First Commit of Voltha-Go-Controller from Radisys

Change-Id: I8e2e908e7ab09a4fe3d86849da18b6d69dcf4ab0
diff --git a/vendor/github.com/cespare/xxhash/v2/LICENSE.txt b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
new file mode 100644
index 0000000..24b5306
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/LICENSE.txt
@@ -0,0 +1,22 @@
+Copyright (c) 2016 Caleb Spare
+
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/cespare/xxhash/v2/README.md b/vendor/github.com/cespare/xxhash/v2/README.md
new file mode 100644
index 0000000..792b4a6
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/README.md
@@ -0,0 +1,69 @@
+# xxhash
+
+[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
+[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
+
+xxhash is a Go implementation of the 64-bit
+[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
+high-quality hashing algorithm that is much faster than anything in the Go
+standard library.
+
+This package provides a straightforward API:
+
+```
+func Sum64(b []byte) uint64
+func Sum64String(s string) uint64
+type Digest struct{ ... }
+    func New() *Digest
+```
+
+The `Digest` type implements hash.Hash64. Its key methods are:
+
+```
+func (*Digest) Write([]byte) (int, error)
+func (*Digest) WriteString(string) (int, error)
+func (*Digest) Sum64() uint64
+```
+
+This implementation provides a fast pure-Go implementation and an even faster
+assembly implementation for amd64.
+
+## Compatibility
+
+This package is in a module and the latest code is in version 2 of the module.
+You need a version of Go with at least "minimal module compatibility" to use
+github.com/cespare/xxhash/v2:
+
+* 1.9.7+ for Go 1.9
+* 1.10.3+ for Go 1.10
+* Go 1.11 or later
+
+I recommend using the latest release of Go.
+
+## Benchmarks
+
+Here are some quick benchmarks comparing the pure-Go and assembly
+implementations of Sum64.
+
+| input size | purego | asm |
+| --- | --- | --- |
+| 5 B   |  979.66 MB/s |  1291.17 MB/s  |
+| 100 B | 7475.26 MB/s | 7973.40 MB/s  |
+| 4 KB  | 17573.46 MB/s | 17602.65 MB/s |
+| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
+
+These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
+the following commands under Go 1.11.2:
+
+```
+$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
+$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
+```
+
+## Projects using this package
+
+- [InfluxDB](https://github.com/influxdata/influxdb)
+- [Prometheus](https://github.com/prometheus/prometheus)
+- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
+- [FreeCache](https://github.com/coocood/freecache)
+- [FastCache](https://github.com/VictoriaMetrics/fastcache)
diff --git a/vendor/github.com/cespare/xxhash/v2/go.mod b/vendor/github.com/cespare/xxhash/v2/go.mod
new file mode 100644
index 0000000..49f6760
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/go.mod
@@ -0,0 +1,3 @@
+module github.com/cespare/xxhash/v2
+
+go 1.11
diff --git a/vendor/github.com/cespare/xxhash/v2/go.sum b/vendor/github.com/cespare/xxhash/v2/go.sum
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/go.sum
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash.go b/vendor/github.com/cespare/xxhash/v2/xxhash.go
new file mode 100644
index 0000000..15c835d
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash.go
@@ -0,0 +1,235 @@
+// Package xxhash implements the 64-bit variant of xxHash (XXH64) as described
+// at http://cyan4973.github.io/xxHash/.
+package xxhash
+
+import (
+	"encoding/binary"
+	"errors"
+	"math/bits"
+)
+
+const (
+	prime1 uint64 = 11400714785074694791
+	prime2 uint64 = 14029467366897019727
+	prime3 uint64 = 1609587929392839161
+	prime4 uint64 = 9650029242287828579
+	prime5 uint64 = 2870177450012600261
+)
+
+// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
+// possible in the Go code is worth a small (but measurable) performance boost
+// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
+// convenience in the Go code in a few places where we need to intentionally
+// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
+// result overflows a uint64).
+var (
+	prime1v = prime1
+	prime2v = prime2
+	prime3v = prime3
+	prime4v = prime4
+	prime5v = prime5
+)
+
+// Digest implements hash.Hash64.
+type Digest struct {
+	v1    uint64
+	v2    uint64
+	v3    uint64
+	v4    uint64
+	total uint64
+	mem   [32]byte
+	n     int // how much of mem is used
+}
+
+// New creates a new Digest that computes the 64-bit xxHash algorithm.
+func New() *Digest {
+	var d Digest
+	d.Reset()
+	return &d
+}
+
+// Reset clears the Digest's state so that it can be reused.
+func (d *Digest) Reset() {
+	d.v1 = prime1v + prime2
+	d.v2 = prime2
+	d.v3 = 0
+	d.v4 = -prime1v
+	d.total = 0
+	d.n = 0
+}
+
+// Size always returns 8 bytes.
+func (d *Digest) Size() int { return 8 }
+
+// BlockSize always returns 32 bytes.
+func (d *Digest) BlockSize() int { return 32 }
+
+// Write adds more data to d. It always returns len(b), nil.
+func (d *Digest) Write(b []byte) (n int, err error) {
+	n = len(b)
+	d.total += uint64(n)
+
+	if d.n+n < 32 {
+		// This new data doesn't even fill the current block.
+		copy(d.mem[d.n:], b)
+		d.n += n
+		return
+	}
+
+	if d.n > 0 {
+		// Finish off the partial block.
+		copy(d.mem[d.n:], b)
+		d.v1 = round(d.v1, u64(d.mem[0:8]))
+		d.v2 = round(d.v2, u64(d.mem[8:16]))
+		d.v3 = round(d.v3, u64(d.mem[16:24]))
+		d.v4 = round(d.v4, u64(d.mem[24:32]))
+		b = b[32-d.n:]
+		d.n = 0
+	}
+
+	if len(b) >= 32 {
+		// One or more full blocks left.
+		nw := writeBlocks(d, b)
+		b = b[nw:]
+	}
+
+	// Store any remaining partial block.
+	copy(d.mem[:], b)
+	d.n = len(b)
+
+	return
+}
+
+// Sum appends the current hash to b and returns the resulting slice.
+func (d *Digest) Sum(b []byte) []byte {
+	s := d.Sum64()
+	return append(
+		b,
+		byte(s>>56),
+		byte(s>>48),
+		byte(s>>40),
+		byte(s>>32),
+		byte(s>>24),
+		byte(s>>16),
+		byte(s>>8),
+		byte(s),
+	)
+}
+
+// Sum64 returns the current hash.
+func (d *Digest) Sum64() uint64 {
+	var h uint64
+
+	if d.total >= 32 {
+		v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = d.v3 + prime5
+	}
+
+	h += d.total
+
+	i, end := 0, d.n
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(d.mem[i:i+8]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(d.mem[i:i+4])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for i < end {
+		h ^= uint64(d.mem[i]) * prime5
+		h = rol11(h) * prime1
+		i++
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+const (
+	magic         = "xxh\x06"
+	marshaledSize = len(magic) + 8*5 + 32
+)
+
+// MarshalBinary implements the encoding.BinaryMarshaler interface.
+func (d *Digest) MarshalBinary() ([]byte, error) {
+	b := make([]byte, 0, marshaledSize)
+	b = append(b, magic...)
+	b = appendUint64(b, d.v1)
+	b = appendUint64(b, d.v2)
+	b = appendUint64(b, d.v3)
+	b = appendUint64(b, d.v4)
+	b = appendUint64(b, d.total)
+	b = append(b, d.mem[:d.n]...)
+	b = b[:len(b)+len(d.mem)-d.n]
+	return b, nil
+}
+
+// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface.
+func (d *Digest) UnmarshalBinary(b []byte) error {
+	if len(b) < len(magic) || string(b[:len(magic)]) != magic {
+		return errors.New("xxhash: invalid hash state identifier")
+	}
+	if len(b) != marshaledSize {
+		return errors.New("xxhash: invalid hash state size")
+	}
+	b = b[len(magic):]
+	b, d.v1 = consumeUint64(b)
+	b, d.v2 = consumeUint64(b)
+	b, d.v3 = consumeUint64(b)
+	b, d.v4 = consumeUint64(b)
+	b, d.total = consumeUint64(b)
+	copy(d.mem[:], b)
+	d.n = int(d.total % uint64(len(d.mem)))
+	return nil
+}
+
+func appendUint64(b []byte, x uint64) []byte {
+	var a [8]byte
+	binary.LittleEndian.PutUint64(a[:], x)
+	return append(b, a[:]...)
+}
+
+func consumeUint64(b []byte) ([]byte, uint64) {
+	x := u64(b)
+	return b[8:], x
+}
+
+func u64(b []byte) uint64 { return binary.LittleEndian.Uint64(b) }
+func u32(b []byte) uint32 { return binary.LittleEndian.Uint32(b) }
+
+func round(acc, input uint64) uint64 {
+	acc += input * prime2
+	acc = rol31(acc)
+	acc *= prime1
+	return acc
+}
+
+func mergeRound(acc, val uint64) uint64 {
+	val = round(0, val)
+	acc ^= val
+	acc = acc*prime1 + prime4
+	return acc
+}
+
+func rol1(x uint64) uint64  { return bits.RotateLeft64(x, 1) }
+func rol7(x uint64) uint64  { return bits.RotateLeft64(x, 7) }
+func rol11(x uint64) uint64 { return bits.RotateLeft64(x, 11) }
+func rol12(x uint64) uint64 { return bits.RotateLeft64(x, 12) }
+func rol18(x uint64) uint64 { return bits.RotateLeft64(x, 18) }
+func rol23(x uint64) uint64 { return bits.RotateLeft64(x, 23) }
+func rol27(x uint64) uint64 { return bits.RotateLeft64(x, 27) }
+func rol31(x uint64) uint64 { return bits.RotateLeft64(x, 31) }
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
new file mode 100644
index 0000000..ad14b80
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.go
@@ -0,0 +1,13 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+//
+//go:noescape
+func Sum64(b []byte) uint64
+
+//go:noescape
+func writeBlocks(d *Digest, b []byte) int
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
new file mode 100644
index 0000000..be8db5b
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
@@ -0,0 +1,215 @@
+// +build !appengine
+// +build gc
+// +build !purego
+
+#include "textflag.h"
+
+// Register allocation:
+// AX	h
+// SI	pointer to advance through b
+// DX	n
+// BX	loop end
+// R8	v1, k1
+// R9	v2
+// R10	v3
+// R11	v4
+// R12	tmp
+// R13	prime1v
+// R14	prime2v
+// DI	prime4v
+
+// round reads from and advances the buffer pointer in SI.
+// It assumes that R13 has prime1v and R14 has prime2v.
+#define round(r) \
+	MOVQ  (SI), R12 \
+	ADDQ  $8, SI    \
+	IMULQ R14, R12  \
+	ADDQ  R12, r    \
+	ROLQ  $31, r    \
+	IMULQ R13, r
+
+// mergeRound applies a merge round on the two registers acc and val.
+// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
+#define mergeRound(acc, val) \
+	IMULQ R14, val \
+	ROLQ  $31, val \
+	IMULQ R13, val \
+	XORQ  val, acc \
+	IMULQ R13, acc \
+	ADDQ  DI, acc
+
+// func Sum64(b []byte) uint64
+TEXT ·Sum64(SB), NOSPLIT, $0-32
+	// Load fixed primes.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+	MOVQ ·prime4v(SB), DI
+
+	// Load slice.
+	MOVQ b_base+0(FP), SI
+	MOVQ b_len+8(FP), DX
+	LEAQ (SI)(DX*1), BX
+
+	// The first loop limit will be len(b)-32.
+	SUBQ $32, BX
+
+	// Check whether we have at least one block.
+	CMPQ DX, $32
+	JLT  noBlocks
+
+	// Set up initial state (v1, v2, v3, v4).
+	MOVQ R13, R8
+	ADDQ R14, R8
+	MOVQ R14, R9
+	XORQ R10, R10
+	XORQ R11, R11
+	SUBQ R13, R11
+
+	// Loop until SI > BX.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ SI, BX
+	JLE  blockLoop
+
+	MOVQ R8, AX
+	ROLQ $1, AX
+	MOVQ R9, R12
+	ROLQ $7, R12
+	ADDQ R12, AX
+	MOVQ R10, R12
+	ROLQ $12, R12
+	ADDQ R12, AX
+	MOVQ R11, R12
+	ROLQ $18, R12
+	ADDQ R12, AX
+
+	mergeRound(AX, R8)
+	mergeRound(AX, R9)
+	mergeRound(AX, R10)
+	mergeRound(AX, R11)
+
+	JMP afterBlocks
+
+noBlocks:
+	MOVQ ·prime5v(SB), AX
+
+afterBlocks:
+	ADDQ DX, AX
+
+	// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
+	ADDQ $24, BX
+
+	CMPQ SI, BX
+	JG   fourByte
+
+wordLoop:
+	// Calculate k1.
+	MOVQ  (SI), R8
+	ADDQ  $8, SI
+	IMULQ R14, R8
+	ROLQ  $31, R8
+	IMULQ R13, R8
+
+	XORQ  R8, AX
+	ROLQ  $27, AX
+	IMULQ R13, AX
+	ADDQ  DI, AX
+
+	CMPQ SI, BX
+	JLE  wordLoop
+
+fourByte:
+	ADDQ $4, BX
+	CMPQ SI, BX
+	JG   singles
+
+	MOVL  (SI), R8
+	ADDQ  $4, SI
+	IMULQ R13, R8
+	XORQ  R8, AX
+
+	ROLQ  $23, AX
+	IMULQ R14, AX
+	ADDQ  ·prime3v(SB), AX
+
+singles:
+	ADDQ $4, BX
+	CMPQ SI, BX
+	JGE  finalize
+
+singlesLoop:
+	MOVBQZX (SI), R12
+	ADDQ    $1, SI
+	IMULQ   ·prime5v(SB), R12
+	XORQ    R12, AX
+
+	ROLQ  $11, AX
+	IMULQ R13, AX
+
+	CMPQ SI, BX
+	JL   singlesLoop
+
+finalize:
+	MOVQ  AX, R12
+	SHRQ  $33, R12
+	XORQ  R12, AX
+	IMULQ R14, AX
+	MOVQ  AX, R12
+	SHRQ  $29, R12
+	XORQ  R12, AX
+	IMULQ ·prime3v(SB), AX
+	MOVQ  AX, R12
+	SHRQ  $32, R12
+	XORQ  R12, AX
+
+	MOVQ AX, ret+24(FP)
+	RET
+
+// writeBlocks uses the same registers as above except that it uses AX to store
+// the d pointer.
+
+// func writeBlocks(d *Digest, b []byte) int
+TEXT ·writeBlocks(SB), NOSPLIT, $0-40
+	// Load fixed primes needed for round.
+	MOVQ ·prime1v(SB), R13
+	MOVQ ·prime2v(SB), R14
+
+	// Load slice.
+	MOVQ b_base+8(FP), SI
+	MOVQ b_len+16(FP), DX
+	LEAQ (SI)(DX*1), BX
+	SUBQ $32, BX
+
+	// Load vN from d.
+	MOVQ d+0(FP), AX
+	MOVQ 0(AX), R8   // v1
+	MOVQ 8(AX), R9   // v2
+	MOVQ 16(AX), R10 // v3
+	MOVQ 24(AX), R11 // v4
+
+	// We don't need to check the loop condition here; this function is
+	// always called with at least one block of data to process.
+blockLoop:
+	round(R8)
+	round(R9)
+	round(R10)
+	round(R11)
+
+	CMPQ SI, BX
+	JLE  blockLoop
+
+	// Copy vN back to d.
+	MOVQ R8, 0(AX)
+	MOVQ R9, 8(AX)
+	MOVQ R10, 16(AX)
+	MOVQ R11, 24(AX)
+
+	// The number of bytes written is SI minus the old base pointer.
+	SUBQ b_base+8(FP), SI
+	MOVQ SI, ret+32(FP)
+
+	RET
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_other.go b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
new file mode 100644
index 0000000..4a5a821
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_other.go
@@ -0,0 +1,76 @@
+// +build !amd64 appengine !gc purego
+
+package xxhash
+
+// Sum64 computes the 64-bit xxHash digest of b.
+func Sum64(b []byte) uint64 {
+	// A simpler version would be
+	//   d := New()
+	//   d.Write(b)
+	//   return d.Sum64()
+	// but this is faster, particularly for small inputs.
+
+	n := len(b)
+	var h uint64
+
+	if n >= 32 {
+		v1 := prime1v + prime2
+		v2 := prime2
+		v3 := uint64(0)
+		v4 := -prime1v
+		for len(b) >= 32 {
+			v1 = round(v1, u64(b[0:8:len(b)]))
+			v2 = round(v2, u64(b[8:16:len(b)]))
+			v3 = round(v3, u64(b[16:24:len(b)]))
+			v4 = round(v4, u64(b[24:32:len(b)]))
+			b = b[32:len(b):len(b)]
+		}
+		h = rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4)
+		h = mergeRound(h, v1)
+		h = mergeRound(h, v2)
+		h = mergeRound(h, v3)
+		h = mergeRound(h, v4)
+	} else {
+		h = prime5
+	}
+
+	h += uint64(n)
+
+	i, end := 0, len(b)
+	for ; i+8 <= end; i += 8 {
+		k1 := round(0, u64(b[i:i+8:len(b)]))
+		h ^= k1
+		h = rol27(h)*prime1 + prime4
+	}
+	if i+4 <= end {
+		h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
+		h = rol23(h)*prime2 + prime3
+		i += 4
+	}
+	for ; i < end; i++ {
+		h ^= uint64(b[i]) * prime5
+		h = rol11(h) * prime1
+	}
+
+	h ^= h >> 33
+	h *= prime2
+	h ^= h >> 29
+	h *= prime3
+	h ^= h >> 32
+
+	return h
+}
+
+func writeBlocks(d *Digest, b []byte) int {
+	v1, v2, v3, v4 := d.v1, d.v2, d.v3, d.v4
+	n := len(b)
+	for len(b) >= 32 {
+		v1 = round(v1, u64(b[0:8:len(b)]))
+		v2 = round(v2, u64(b[8:16:len(b)]))
+		v3 = round(v3, u64(b[16:24:len(b)]))
+		v4 = round(v4, u64(b[24:32:len(b)]))
+		b = b[32:len(b):len(b)]
+	}
+	d.v1, d.v2, d.v3, d.v4 = v1, v2, v3, v4
+	return n - len(b)
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
new file mode 100644
index 0000000..fc9bea7
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
@@ -0,0 +1,15 @@
+// +build appengine
+
+// This file contains the safe implementations of otherwise unsafe-using code.
+
+package xxhash
+
+// Sum64String computes the 64-bit xxHash digest of s.
+func Sum64String(s string) uint64 {
+	return Sum64([]byte(s))
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	return d.Write([]byte(s))
+}
diff --git a/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
new file mode 100644
index 0000000..376e0ca
--- /dev/null
+++ b/vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
@@ -0,0 +1,57 @@
+// +build !appengine
+
+// This file encapsulates usage of unsafe.
+// xxhash_safe.go contains the safe implementations.
+
+package xxhash
+
+import (
+	"unsafe"
+)
+
+// In the future it's possible that compiler optimizations will make these
+// XxxString functions unnecessary by realizing that calls such as
+// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
+// If that happens, even if we keep these functions they can be replaced with
+// the trivial safe code.
+
+// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
+//
+//   var b []byte
+//   bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
+//   bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
+//   bh.Len = len(s)
+//   bh.Cap = len(s)
+//
+// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
+// weight to this sequence of expressions that any function that uses it will
+// not be inlined. Instead, the functions below use a different unsafe
+// conversion designed to minimize the inliner weight and allow both to be
+// inlined. There is also a test (TestInlining) which verifies that these are
+// inlined.
+//
+// See https://github.com/golang/go/issues/42739 for discussion.
+
+// Sum64String computes the 64-bit xxHash digest of s.
+// It may be faster than Sum64([]byte(s)) by avoiding a copy.
+func Sum64String(s string) uint64 {
+	b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
+	return Sum64(b)
+}
+
+// WriteString adds more data to d. It always returns len(s), nil.
+// It may be faster than Write([]byte(s)) by avoiding a copy.
+func (d *Digest) WriteString(s string) (n int, err error) {
+	d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
+	// d.Write always returns len(s), nil.
+	// Ignoring the return output and returning these fixed values buys a
+	// savings of 6 in the inliner's cost model.
+	return len(s), nil
+}
+
+// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
+// of the first two words is the same as the layout of a string.
+type sliceHeader struct {
+	s   string
+	cap int
+}
diff --git a/vendor/github.com/cevaris/ordered_map/.gitignore b/vendor/github.com/cevaris/ordered_map/.gitignore
new file mode 100644
index 0000000..4af4e59
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/.gitignore
@@ -0,0 +1,5 @@
+*.test
+*~
+
+.idea
+*.iml
diff --git a/vendor/github.com/cevaris/ordered_map/.travis.yml b/vendor/github.com/cevaris/ordered_map/.travis.yml
new file mode 100644
index 0000000..193242f
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/.travis.yml
@@ -0,0 +1,19 @@
+---
+language: go
+
+go:
+  - tip
+  - 1.12
+  - 1.11
+  - 1.10
+  - 1.9
+  - 1.8
+  - 1.7
+  - 1.6
+  - 1.5
+  - 1.4
+  - 1.3
+
+install:
+  - make
+  - make test
diff --git a/vendor/github.com/cevaris/ordered_map/LICENSE.md b/vendor/github.com/cevaris/ordered_map/LICENSE.md
new file mode 100644
index 0000000..4cb9b14
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/LICENSE.md
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2015-2016 Adam Cardenas
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/cevaris/ordered_map/Makefile b/vendor/github.com/cevaris/ordered_map/Makefile
new file mode 100644
index 0000000..099e53b
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/Makefile
@@ -0,0 +1,10 @@
+all: build install 
+
+build:
+	go build
+
+install:
+	go install
+
+test:
+	go test -v *.go
diff --git a/vendor/github.com/cevaris/ordered_map/README.md b/vendor/github.com/cevaris/ordered_map/README.md
new file mode 100644
index 0000000..bc3e366
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/README.md
@@ -0,0 +1,113 @@
+# Ordered Map for golang
+
+[![Build Status](https://travis-ci.org/cevaris/ordered_map.svg?branch=master)](https://travis-ci.org/cevaris/ordered_map)
+
+**OrderedMap** is a Python port of OrderedDict implemented in golang. Golang's builtin `map` purposefully randomizes the iteration of stored key/values. **OrderedMap** struct preserves inserted key/value pairs; such that on iteration, key/value pairs are received in inserted (first in, first out) order.
+
+
+## Features
+- Full support Key/Value for all data types
+- Exposes an Iterator that iterates in order of insertion
+- Full Get/Set/Delete map interface
+- Supports Golang v1.3 through v1.12
+
+## Download and Install 
+  
+`go get https://github.com/cevaris/ordered_map.git`
+
+
+## Examples
+
+### Create, Get, Set, Delete
+
+```go
+package main
+
+import (
+    "fmt"
+    "github.com/cevaris/ordered_map"
+)
+
+func main() {
+
+    // Init new OrderedMap
+    om := ordered_map.NewOrderedMap()
+
+    // Set key
+    om.Set("a", 1)
+    om.Set("b", 2)
+    om.Set("c", 3)
+    om.Set("d", 4)
+
+    // Same interface as builtin map
+    if val, ok := om.Get("b"); ok == true {
+        // Found key "b"
+        fmt.Println(val)
+    }
+
+    // Delete a key
+    om.Delete("c")
+
+    // Failed Get lookup becase we deleted "c"
+    if _, ok := om.Get("c"); ok == false {
+        // Did not find key "c"
+        fmt.Println("c not found")
+    }
+    
+    fmt.Println(om)
+}
+```
+
+
+### Iterator
+
+```go
+n := 100
+om := ordered_map.NewOrderedMap()
+
+for i := 0; i < n; i++ {
+    // Insert data into OrderedMap
+    om.Set(i, fmt.Sprintf("%d", i * i))
+}
+
+// Iterate though values
+// - Values iteration are in insert order
+// - Returned in a key/value pair struct
+iter := om.IterFunc()
+for kv, ok := iter(); ok; kv, ok = iter() {
+    fmt.Println(kv, kv.Key, kv.Value)
+}
+```
+
+### Custom Structs
+
+```go
+om := ordered_map.NewOrderedMap()
+om.Set("one", &MyStruct{1, 1.1})
+om.Set("two", &MyStruct{2, 2.2})
+om.Set("three", &MyStruct{3, 3.3})
+
+fmt.Println(om)
+// Ouput: OrderedMap[one:&{1 1.1},  two:&{2 2.2},  three:&{3 3.3}, ]
+```
+  
+## For Development
+
+Git clone project 
+
+`git clone https://github.com/cevaris/ordered_map.git`  
+  
+Build and install project
+
+`make`
+
+Run tests 
+
+`make test`
+
+
+
+
+
+
+
diff --git a/vendor/github.com/cevaris/ordered_map/key_pair.go b/vendor/github.com/cevaris/ordered_map/key_pair.go
new file mode 100644
index 0000000..88afbcf
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/key_pair.go
@@ -0,0 +1,16 @@
+package ordered_map
+
+import "fmt"
+
+type KVPair struct {
+	Key   interface{}
+	Value interface{}
+}
+
+func (k *KVPair) String() string {
+	return fmt.Sprintf("%v:%v", k.Key, k.Value)
+}
+
+func (kv1 *KVPair) Compare(kv2 *KVPair) bool {
+	return kv1.Key == kv2.Key && kv1.Value == kv2.Value
+}
\ No newline at end of file
diff --git a/vendor/github.com/cevaris/ordered_map/node.go b/vendor/github.com/cevaris/ordered_map/node.go
new file mode 100644
index 0000000..ad0d142
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/node.go
@@ -0,0 +1,62 @@
+package ordered_map
+
+import (
+	"fmt"
+	"bytes"
+)
+
+type node struct {
+	Prev  *node
+	Next  *node
+	Value interface{}
+}
+
+func newRootNode() *node {
+	root := &node{}
+	root.Prev = root
+	root.Next = root
+	return root
+}
+
+func newNode(prev *node, next *node, key interface{}) *node {
+	return &node{Prev: prev, Next: next, Value: key}
+}
+
+func (n *node) Add(value string) {
+	root := n
+	last := root.Prev
+	last.Next = newNode(last, n, value)
+	root.Prev = last.Next
+}
+
+func (n *node) String() string {
+	var buffer bytes.Buffer
+	if n.Value == "" {
+		// Need to sentinel
+		var curr *node
+		root := n
+		curr = root.Next
+		for curr != root {
+			buffer.WriteString(fmt.Sprintf("%s, ", curr.Value))
+			curr = curr.Next
+		}
+	} else {
+		// Else, print pointer value
+		buffer.WriteString(fmt.Sprintf("%p, ", &n))
+	}
+	return fmt.Sprintf("LinkList[%v]", buffer.String())
+}
+
+func (n *node) IterFunc() func() (string, bool) {
+	var curr *node
+	root := n
+	curr = root.Next
+	return func() (string, bool) {
+		for curr != root {
+			tmp := curr.Value.(string)
+			curr = curr.Next
+			return tmp, true
+		}
+		return "", false
+	}
+}
diff --git a/vendor/github.com/cevaris/ordered_map/ordered_map.go b/vendor/github.com/cevaris/ordered_map/ordered_map.go
new file mode 100644
index 0000000..4116cd8
--- /dev/null
+++ b/vendor/github.com/cevaris/ordered_map/ordered_map.go
@@ -0,0 +1,121 @@
+package ordered_map
+
+import (
+	"fmt"
+)
+
+type OrderedMap struct {
+	store  map[interface{}]interface{}
+	mapper map[interface{}]*node
+	root   *node
+}
+
+func NewOrderedMap() *OrderedMap {
+	om := &OrderedMap{
+		store:  make(map[interface{}]interface{}),
+		mapper: make(map[interface{}]*node),
+		root:   newRootNode(),
+	}
+	return om
+}
+
+func NewOrderedMapWithArgs(args []*KVPair) *OrderedMap {
+	om := NewOrderedMap()
+	om.update(args)
+	return om
+}
+
+func (om *OrderedMap) update(args []*KVPair) {
+	for _, pair := range args {
+		om.Set(pair.Key, pair.Value)
+	}
+}
+
+func (om *OrderedMap) Set(key interface{}, value interface{}) {
+	if _, ok := om.store[key]; ok == false {
+		root := om.root
+		last := root.Prev
+		last.Next = newNode(last, root, key)
+		root.Prev = last.Next
+		om.mapper[key] = last.Next
+	}
+	om.store[key] = value
+}
+
+func (om *OrderedMap) Get(key interface{}) (interface{}, bool) {
+	val, ok := om.store[key]
+	return val, ok
+}
+
+func (om *OrderedMap) Delete(key interface{}) {
+	_, ok := om.store[key]
+	if ok {
+		delete(om.store, key)
+	}
+	root, rootFound := om.mapper[key]
+	if rootFound {
+		prev := root.Prev
+		next := root.Next
+		prev.Next = next
+		next.Prev = prev
+		delete(om.mapper, key)
+	}
+}
+
+func (om *OrderedMap) String() string {
+	builder := make([]string, len(om.store))
+
+	var index int = 0
+	iter := om.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		val, _ := om.Get(kv.Key)
+		builder[index] = fmt.Sprintf("%v:%v, ", kv.Key, val)
+		index++
+	}
+	return fmt.Sprintf("OrderedMap%v", builder)
+}
+
+func (om *OrderedMap) Iter() <-chan *KVPair {
+	println("Iter() method is deprecated!. Use IterFunc() instead.")
+	return om.UnsafeIter()
+}
+
+/*
+Beware, Iterator leaks goroutines if we do not fully traverse the map.
+For most cases, `IterFunc()` should work as an iterator.
+ */
+func (om *OrderedMap) UnsafeIter() <-chan *KVPair {
+	keys := make(chan *KVPair)
+	go func() {
+		defer close(keys)
+		var curr *node
+		root := om.root
+		curr = root.Next
+		for curr != root {
+			v, _ := om.store[curr.Value]
+			keys <- &KVPair{curr.Value, v}
+			curr = curr.Next
+		}
+	}()
+	return keys
+}
+
+func (om *OrderedMap) IterFunc() func() (*KVPair, bool) {
+	var curr *node
+	root := om.root
+	curr = root.Next
+	return func() (*KVPair, bool) {
+		for curr != root {
+			tmp := curr
+			curr = curr.Next
+			v, _ := om.store[tmp.Value]
+			return &KVPair{tmp.Value, v}, true
+		}
+		return nil, false
+	}
+}
+
+func (om *OrderedMap) Len() int {
+	return len(om.store)
+}
+
diff --git a/vendor/github.com/coreos/etcd/LICENSE b/vendor/github.com/coreos/etcd/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/etcd/NOTICE b/vendor/github.com/coreos/etcd/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
new file mode 100644
index 0000000..c5faf00
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.pb.go
@@ -0,0 +1,972 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: auth.proto
+
+package authpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Permission_Type int32
+
+const (
+	READ      Permission_Type = 0
+	WRITE     Permission_Type = 1
+	READWRITE Permission_Type = 2
+)
+
+var Permission_Type_name = map[int32]string{
+	0: "READ",
+	1: "WRITE",
+	2: "READWRITE",
+}
+
+var Permission_Type_value = map[string]int32{
+	"READ":      0,
+	"WRITE":     1,
+	"READWRITE": 2,
+}
+
+func (x Permission_Type) String() string {
+	return proto.EnumName(Permission_Type_name, int32(x))
+}
+
+func (Permission_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{1, 0}
+}
+
+// User is a single entry in the bucket authUsers
+type User struct {
+	Name                 []byte   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             []byte   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	Roles                []string `protobuf:"bytes,3,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *User) Reset()         { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage()    {}
+func (*User) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{0}
+}
+func (m *User) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *User) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_User.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *User) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_User.Merge(m, src)
+}
+func (m *User) XXX_Size() int {
+	return m.Size()
+}
+func (m *User) XXX_DiscardUnknown() {
+	xxx_messageInfo_User.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_User proto.InternalMessageInfo
+
+// Permission is a single entity
+type Permission struct {
+	PermType             Permission_Type `protobuf:"varint,1,opt,name=permType,proto3,enum=authpb.Permission_Type" json:"permType,omitempty"`
+	Key                  []byte          `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	RangeEnd             []byte          `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *Permission) Reset()         { *m = Permission{} }
+func (m *Permission) String() string { return proto.CompactTextString(m) }
+func (*Permission) ProtoMessage()    {}
+func (*Permission) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{1}
+}
+func (m *Permission) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Permission) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Permission.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Permission) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Permission.Merge(m, src)
+}
+func (m *Permission) XXX_Size() int {
+	return m.Size()
+}
+func (m *Permission) XXX_DiscardUnknown() {
+	xxx_messageInfo_Permission.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Permission proto.InternalMessageInfo
+
+// Role is a single entry in the bucket authRoles
+type Role struct {
+	Name                 []byte        `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	KeyPermission        []*Permission `protobuf:"bytes,2,rep,name=keyPermission,proto3" json:"keyPermission,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *Role) Reset()         { *m = Role{} }
+func (m *Role) String() string { return proto.CompactTextString(m) }
+func (*Role) ProtoMessage()    {}
+func (*Role) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8bbd6f3875b0e874, []int{2}
+}
+func (m *Role) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Role) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Role.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Role) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Role.Merge(m, src)
+}
+func (m *Role) XXX_Size() int {
+	return m.Size()
+}
+func (m *Role) XXX_DiscardUnknown() {
+	xxx_messageInfo_Role.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Role proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("authpb.Permission_Type", Permission_Type_name, Permission_Type_value)
+	proto.RegisterType((*User)(nil), "authpb.User")
+	proto.RegisterType((*Permission)(nil), "authpb.Permission")
+	proto.RegisterType((*Role)(nil), "authpb.Role")
+}
+
+func init() { proto.RegisterFile("auth.proto", fileDescriptor_8bbd6f3875b0e874) }
+
+var fileDescriptor_8bbd6f3875b0e874 = []byte{
+	// 288 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0xc1, 0x4a, 0xc3, 0x30,
+	0x1c, 0xc6, 0x9b, 0xb6, 0x1b, 0xed, 0x5f, 0x27, 0x25, 0x0c, 0x0c, 0x13, 0x42, 0xe9, 0xa9, 0x78,
+	0xa8, 0xb0, 0x5d, 0xbc, 0x2a, 0xf6, 0x20, 0x78, 0x90, 0x50, 0xf1, 0x28, 0x1d, 0x0d, 0x75, 0x6c,
+	0x6d, 0x4a, 0x32, 0x91, 0xbe, 0x89, 0x07, 0x1f, 0x68, 0xc7, 0x3d, 0x82, 0xab, 0x2f, 0x22, 0x4d,
+	0x64, 0x43, 0xdc, 0xed, 0xfb, 0xbe, 0xff, 0x97, 0xe4, 0x97, 0x3f, 0x40, 0xfe, 0xb6, 0x7e, 0x4d,
+	0x1a, 0x29, 0xd6, 0x02, 0x0f, 0x7b, 0xdd, 0xcc, 0x27, 0xe3, 0x52, 0x94, 0x42, 0x47, 0x57, 0xbd,
+	0x32, 0xd3, 0xe8, 0x01, 0xdc, 0x27, 0xc5, 0x25, 0xc6, 0xe0, 0xd6, 0x79, 0xc5, 0x09, 0x0a, 0x51,
+	0x7c, 0xca, 0xb4, 0xc6, 0x13, 0xf0, 0x9a, 0x5c, 0xa9, 0x77, 0x21, 0x0b, 0x62, 0xeb, 0x7c, 0xef,
+	0xf1, 0x18, 0x06, 0x52, 0xac, 0xb8, 0x22, 0x4e, 0xe8, 0xc4, 0x3e, 0x33, 0x26, 0xfa, 0x44, 0x00,
+	0x8f, 0x5c, 0x56, 0x0b, 0xa5, 0x16, 0xa2, 0xc6, 0x33, 0xf0, 0x1a, 0x2e, 0xab, 0xac, 0x6d, 0xcc,
+	0xc5, 0x67, 0xd3, 0xf3, 0xc4, 0xd0, 0x24, 0x87, 0x56, 0xd2, 0x8f, 0xd9, 0xbe, 0x88, 0x03, 0x70,
+	0x96, 0xbc, 0xfd, 0x7d, 0xb0, 0x97, 0xf8, 0x02, 0x7c, 0x99, 0xd7, 0x25, 0x7f, 0xe1, 0x75, 0x41,
+	0x1c, 0x03, 0xa2, 0x83, 0xb4, 0x2e, 0xa2, 0x4b, 0x70, 0xf5, 0x31, 0x0f, 0x5c, 0x96, 0xde, 0xdc,
+	0x05, 0x16, 0xf6, 0x61, 0xf0, 0xcc, 0xee, 0xb3, 0x34, 0x40, 0x78, 0x04, 0x7e, 0x1f, 0x1a, 0x6b,
+	0x47, 0x19, 0xb8, 0x4c, 0xac, 0xf8, 0xd1, 0xcf, 0x5e, 0xc3, 0x68, 0xc9, 0xdb, 0x03, 0x16, 0xb1,
+	0x43, 0x27, 0x3e, 0x99, 0xe2, 0xff, 0xc0, 0xec, 0x6f, 0xf1, 0x96, 0x6c, 0x76, 0xd4, 0xda, 0xee,
+	0xa8, 0xb5, 0xe9, 0x28, 0xda, 0x76, 0x14, 0x7d, 0x75, 0x14, 0x7d, 0x7c, 0x53, 0x6b, 0x3e, 0xd4,
+	0x3b, 0x9e, 0xfd, 0x04, 0x00, 0x00, 0xff, 0xff, 0xcc, 0x76, 0x8d, 0x4f, 0x8f, 0x01, 0x00, 0x00,
+}
+
+func (m *User) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *User) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *User) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintAuth(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Permission) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Permission) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Permission) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.PermType != 0 {
+		i = encodeVarintAuth(dAtA, i, uint64(m.PermType))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Role) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Role) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Role) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.KeyPermission) > 0 {
+		for iNdEx := len(m.KeyPermission) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.KeyPermission[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintAuth(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintAuth(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintAuth(dAtA []byte, offset int, v uint64) int {
+	offset -= sovAuth(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *User) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovAuth(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Permission) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.PermType != 0 {
+		n += 1 + sovAuth(uint64(m.PermType))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Role) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovAuth(uint64(l))
+	}
+	if len(m.KeyPermission) > 0 {
+		for _, e := range m.KeyPermission {
+			l = e.Size()
+			n += 1 + l + sovAuth(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovAuth(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozAuth(x uint64) (n int) {
+	return sovAuth(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *User) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: User: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: User: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = append(m.Password[:0], dAtA[iNdEx:postIndex]...)
+			if m.Password == nil {
+				m.Password = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Permission) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Permission: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Permission: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PermType", wireType)
+			}
+			m.PermType = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PermType |= Permission_Type(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Role) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Role: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Role: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = append(m.Name[:0], dAtA[iNdEx:postIndex]...)
+			if m.Name == nil {
+				m.Name = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeyPermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthAuth
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.KeyPermission = append(m.KeyPermission, &Permission{})
+			if err := m.KeyPermission[len(m.KeyPermission)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipAuth(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthAuth
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipAuth(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowAuth
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowAuth
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthAuth
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthAuth
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowAuth
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipAuth(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthAuth
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthAuth = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowAuth   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/auth/authpb/auth.proto b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
new file mode 100644
index 0000000..001d334
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/auth/authpb/auth.proto
@@ -0,0 +1,37 @@
+syntax = "proto3";
+package authpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+// User is a single entry in the bucket authUsers
+message User {
+  bytes name = 1;
+  bytes password = 2;
+  repeated string roles = 3;
+}
+
+// Permission is a single entity
+message Permission {
+  enum Type {
+    READ = 0;
+    WRITE = 1;
+    READWRITE = 2;
+  }
+  Type permType = 1;
+
+  bytes key = 2;
+  bytes range_end = 3;
+}
+
+// Role is a single entry in the bucket authRoles
+message Role {
+  bytes name = 1;
+
+  repeated Permission keyPermission = 2;
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
new file mode 100644
index 0000000..9306385
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/balancer.go
@@ -0,0 +1,293 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package balancer implements client balancer.
+package balancer
+
+import (
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/coreos/etcd/clientv3/balancer/connectivity"
+	"github.com/coreos/etcd/clientv3/balancer/picker"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/balancer"
+	grpcconnectivity "google.golang.org/grpc/connectivity"
+	"google.golang.org/grpc/resolver"
+	_ "google.golang.org/grpc/resolver/dns"         // register DNS resolver
+	_ "google.golang.org/grpc/resolver/passthrough" // register passthrough resolver
+)
+
+// Config defines balancer configurations.
+type Config struct {
+	// Policy configures balancer policy.
+	Policy picker.Policy
+
+	// Picker implements gRPC picker.
+	// Leave empty if "Policy" field is not custom.
+	// TODO: currently custom policy is not supported.
+	// Picker picker.Picker
+
+	// Name defines an additional name for balancer.
+	// Useful for balancer testing to avoid register conflicts.
+	// If empty, defaults to policy name.
+	Name string
+
+	// Logger configures balancer logging.
+	// If nil, logs are discarded.
+	Logger *zap.Logger
+}
+
+// RegisterBuilder creates and registers a builder. Since this function calls balancer.Register, it
+// must be invoked at initialization time.
+func RegisterBuilder(cfg Config) {
+	bb := &builder{cfg}
+	balancer.Register(bb)
+
+	bb.cfg.Logger.Debug(
+		"registered balancer",
+		zap.String("policy", bb.cfg.Policy.String()),
+		zap.String("name", bb.cfg.Name),
+	)
+}
+
+type builder struct {
+	cfg Config
+}
+
+// Build is called initially when creating "ccBalancerWrapper".
+// "grpc.Dial" is called to this client connection.
+// Then, resolved addresses will be handled via "HandleResolvedAddrs".
+func (b *builder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
+	bb := &baseBalancer{
+		id:     strconv.FormatInt(time.Now().UnixNano(), 36),
+		policy: b.cfg.Policy,
+		name:   b.cfg.Name,
+		lg:     b.cfg.Logger,
+
+		addrToSc: make(map[resolver.Address]balancer.SubConn),
+		scToAddr: make(map[balancer.SubConn]resolver.Address),
+		scToSt:   make(map[balancer.SubConn]grpcconnectivity.State),
+
+		currentConn:          nil,
+		connectivityRecorder: connectivity.New(b.cfg.Logger),
+
+		// initialize picker always returns "ErrNoSubConnAvailable"
+		picker: picker.NewErr(balancer.ErrNoSubConnAvailable),
+	}
+
+	// TODO: support multiple connections
+	bb.mu.Lock()
+	bb.currentConn = cc
+	bb.mu.Unlock()
+
+	bb.lg.Info(
+		"built balancer",
+		zap.String("balancer-id", bb.id),
+		zap.String("policy", bb.policy.String()),
+		zap.String("resolver-target", cc.Target()),
+	)
+	return bb
+}
+
+// Name implements "grpc/balancer.Builder" interface.
+func (b *builder) Name() string { return b.cfg.Name }
+
+// Balancer defines client balancer interface.
+type Balancer interface {
+	// Balancer is called on specified client connection. Client initiates gRPC
+	// connection with "grpc.Dial(addr, grpc.WithBalancerName)", and then those resolved
+	// addresses are passed to "grpc/balancer.Balancer.HandleResolvedAddrs".
+	// For each resolved address, balancer calls "balancer.ClientConn.NewSubConn".
+	// "grpc/balancer.Balancer.HandleSubConnStateChange" is called when connectivity state
+	// changes, thus requires failover logic in this method.
+	balancer.Balancer
+
+	// Picker calls "Pick" for every client request.
+	picker.Picker
+}
+
+type baseBalancer struct {
+	id     string
+	policy picker.Policy
+	name   string
+	lg     *zap.Logger
+
+	mu sync.RWMutex
+
+	addrToSc map[resolver.Address]balancer.SubConn
+	scToAddr map[balancer.SubConn]resolver.Address
+	scToSt   map[balancer.SubConn]grpcconnectivity.State
+
+	currentConn          balancer.ClientConn
+	connectivityRecorder connectivity.Recorder
+
+	picker picker.Picker
+}
+
+// HandleResolvedAddrs implements "grpc/balancer.Balancer" interface.
+// gRPC sends initial or updated resolved addresses from "Build".
+func (bb *baseBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
+	if err != nil {
+		bb.lg.Warn("HandleResolvedAddrs called with error", zap.String("balancer-id", bb.id), zap.Error(err))
+		return
+	}
+	bb.lg.Info("resolved",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.Strings("addresses", addrsToStrings(addrs)),
+	)
+
+	bb.mu.Lock()
+	defer bb.mu.Unlock()
+
+	resolved := make(map[resolver.Address]struct{})
+	for _, addr := range addrs {
+		resolved[addr] = struct{}{}
+		if _, ok := bb.addrToSc[addr]; !ok {
+			sc, err := bb.currentConn.NewSubConn([]resolver.Address{addr}, balancer.NewSubConnOptions{})
+			if err != nil {
+				bb.lg.Warn("NewSubConn failed", zap.String("picker", bb.picker.String()), zap.String("balancer-id", bb.id), zap.Error(err), zap.String("address", addr.Addr))
+				continue
+			}
+			bb.lg.Info("created subconn", zap.String("address", addr.Addr))
+			bb.addrToSc[addr] = sc
+			bb.scToAddr[sc] = addr
+			bb.scToSt[sc] = grpcconnectivity.Idle
+			sc.Connect()
+		}
+	}
+
+	for addr, sc := range bb.addrToSc {
+		if _, ok := resolved[addr]; !ok {
+			// was removed by resolver or failed to create subconn
+			bb.currentConn.RemoveSubConn(sc)
+			delete(bb.addrToSc, addr)
+
+			bb.lg.Info(
+				"removed subconn",
+				zap.String("picker", bb.picker.String()),
+				zap.String("balancer-id", bb.id),
+				zap.String("address", addr.Addr),
+				zap.String("subconn", scToString(sc)),
+			)
+
+			// Keep the state of this sc in bb.scToSt until sc's state becomes Shutdown.
+			// The entry will be deleted in HandleSubConnStateChange.
+			// (DO NOT) delete(bb.scToAddr, sc)
+			// (DO NOT) delete(bb.scToSt, sc)
+		}
+	}
+}
+
+// HandleSubConnStateChange implements "grpc/balancer.Balancer" interface.
+func (bb *baseBalancer) HandleSubConnStateChange(sc balancer.SubConn, s grpcconnectivity.State) {
+	bb.mu.Lock()
+	defer bb.mu.Unlock()
+
+	old, ok := bb.scToSt[sc]
+	if !ok {
+		bb.lg.Warn(
+			"state change for an unknown subconn",
+			zap.String("picker", bb.picker.String()),
+			zap.String("balancer-id", bb.id),
+			zap.String("subconn", scToString(sc)),
+			zap.Int("subconn-size", len(bb.scToAddr)),
+			zap.String("state", s.String()),
+		)
+		return
+	}
+
+	bb.lg.Info(
+		"state changed",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.Bool("connected", s == grpcconnectivity.Ready),
+		zap.String("subconn", scToString(sc)),
+		zap.Int("subconn-size", len(bb.scToAddr)),
+		zap.String("address", bb.scToAddr[sc].Addr),
+		zap.String("old-state", old.String()),
+		zap.String("new-state", s.String()),
+	)
+
+	bb.scToSt[sc] = s
+	switch s {
+	case grpcconnectivity.Idle:
+		sc.Connect()
+	case grpcconnectivity.Shutdown:
+		// When an address was removed by resolver, b called RemoveSubConn but
+		// kept the sc's state in scToSt. Remove state for this sc here.
+		delete(bb.scToAddr, sc)
+		delete(bb.scToSt, sc)
+	}
+
+	oldAggrState := bb.connectivityRecorder.GetCurrentState()
+	bb.connectivityRecorder.RecordTransition(old, s)
+
+	// Update balancer picker when one of the following happens:
+	//  - this sc became ready from not-ready
+	//  - this sc became not-ready from ready
+	//  - the aggregated state of balancer became TransientFailure from non-TransientFailure
+	//  - the aggregated state of balancer became non-TransientFailure from TransientFailure
+	if (s == grpcconnectivity.Ready) != (old == grpcconnectivity.Ready) ||
+		(bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure) != (oldAggrState == grpcconnectivity.TransientFailure) {
+		bb.updatePicker()
+	}
+
+	bb.currentConn.UpdateBalancerState(bb.connectivityRecorder.GetCurrentState(), bb.picker)
+}
+
+func (bb *baseBalancer) updatePicker() {
+	if bb.connectivityRecorder.GetCurrentState() == grpcconnectivity.TransientFailure {
+		bb.picker = picker.NewErr(balancer.ErrTransientFailure)
+		bb.lg.Info(
+			"updated picker to transient error picker",
+			zap.String("picker", bb.picker.String()),
+			zap.String("balancer-id", bb.id),
+			zap.String("policy", bb.policy.String()),
+		)
+		return
+	}
+
+	// only pass ready subconns to picker
+	scToAddr := make(map[balancer.SubConn]resolver.Address)
+	for addr, sc := range bb.addrToSc {
+		if st, ok := bb.scToSt[sc]; ok && st == grpcconnectivity.Ready {
+			scToAddr[sc] = addr
+		}
+	}
+
+	bb.picker = picker.New(picker.Config{
+		Policy:                   bb.policy,
+		Logger:                   bb.lg,
+		SubConnToResolverAddress: scToAddr,
+	})
+	bb.lg.Info(
+		"updated picker",
+		zap.String("picker", bb.picker.String()),
+		zap.String("balancer-id", bb.id),
+		zap.String("policy", bb.policy.String()),
+		zap.Strings("subconn-ready", scsToStrings(scToAddr)),
+		zap.Int("subconn-size", len(scToAddr)),
+	)
+}
+
+// Close implements "grpc/balancer.Balancer" interface.
+// Close is a nop because base balancer doesn't have internal state to clean up,
+// and it doesn't need to call RemoveSubConn for the SubConns.
+func (bb *baseBalancer) Close() {
+	// TODO
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
new file mode 100644
index 0000000..4c4ad36
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/connectivity/connectivity.go
@@ -0,0 +1,93 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package connectivity implements client connectivity operations.
+package connectivity
+
+import (
+	"sync"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/connectivity"
+)
+
+// Recorder records gRPC connectivity.
+type Recorder interface {
+	GetCurrentState() connectivity.State
+	RecordTransition(oldState, newState connectivity.State)
+}
+
+// New returns a new Recorder.
+func New(lg *zap.Logger) Recorder {
+	return &recorder{lg: lg}
+}
+
+// recorder takes the connectivity states of multiple SubConns
+// and returns one aggregated connectivity state.
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+type recorder struct {
+	lg *zap.Logger
+
+	mu sync.RWMutex
+
+	cur connectivity.State
+
+	numReady            uint64 // Number of addrConns in ready state.
+	numConnecting       uint64 // Number of addrConns in connecting state.
+	numTransientFailure uint64 // Number of addrConns in transientFailure.
+}
+
+func (rc *recorder) GetCurrentState() (state connectivity.State) {
+	rc.mu.RLock()
+	defer rc.mu.RUnlock()
+	return rc.cur
+}
+
+// RecordTransition records state change happening in subConn and based on that
+// it evaluates what aggregated state should be.
+//
+//  - If at least one SubConn in Ready, the aggregated state is Ready;
+//  - Else if at least one SubConn in Connecting, the aggregated state is Connecting;
+//  - Else the aggregated state is TransientFailure.
+//
+// Idle and Shutdown are not considered.
+//
+// ref. https://github.com/grpc/grpc-go/blob/master/balancer/balancer.go
+func (rc *recorder) RecordTransition(oldState, newState connectivity.State) {
+	rc.mu.Lock()
+	defer rc.mu.Unlock()
+
+	for idx, state := range []connectivity.State{oldState, newState} {
+		updateVal := 2*uint64(idx) - 1 // -1 for oldState and +1 for new.
+		switch state {
+		case connectivity.Ready:
+			rc.numReady += updateVal
+		case connectivity.Connecting:
+			rc.numConnecting += updateVal
+		case connectivity.TransientFailure:
+			rc.numTransientFailure += updateVal
+		default:
+			rc.lg.Warn("connectivity recorder received unknown state", zap.String("connectivity-state", state.String()))
+		}
+	}
+
+	switch { // must be exclusive, no overlap
+	case rc.numReady > 0:
+		rc.cur = connectivity.Ready
+	case rc.numConnecting > 0:
+		rc.cur = connectivity.Connecting
+	default:
+		rc.cur = connectivity.TransientFailure
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
new file mode 100644
index 0000000..35dabf5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package picker defines/implements client balancer picker policy.
+package picker
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
new file mode 100644
index 0000000..9e04378
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/err.go
@@ -0,0 +1,39 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"context"
+
+	"google.golang.org/grpc/balancer"
+)
+
+// NewErr returns a picker that always returns err on "Pick".
+func NewErr(err error) Picker {
+	return &errPicker{p: Error, err: err}
+}
+
+type errPicker struct {
+	p   Policy
+	err error
+}
+
+func (ep *errPicker) String() string {
+	return ep.p.String()
+}
+
+func (ep *errPicker) Pick(context.Context, balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	return nil, nil, ep.err
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
new file mode 100644
index 0000000..bd1a5d2
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/picker.go
@@ -0,0 +1,91 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"fmt"
+
+	"go.uber.org/zap"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// Picker defines balancer Picker methods.
+type Picker interface {
+	balancer.Picker
+	String() string
+}
+
+// Config defines picker configuration.
+type Config struct {
+	// Policy specifies etcd clientv3's built in balancer policy.
+	Policy Policy
+
+	// Logger defines picker logging object.
+	Logger *zap.Logger
+
+	// SubConnToResolverAddress maps each gRPC sub-connection to an address.
+	// Basically, it is a list of addresses that the Picker can pick from.
+	SubConnToResolverAddress map[balancer.SubConn]resolver.Address
+}
+
+// Policy defines balancer picker policy.
+type Policy uint8
+
+const (
+	// Error is error picker policy.
+	Error Policy = iota
+
+	// RoundrobinBalanced balances loads over multiple endpoints
+	// and implements failover in roundrobin fashion.
+	RoundrobinBalanced
+
+	// Custom defines custom balancer picker.
+	// TODO: custom picker is not supported yet.
+	Custom
+)
+
+func (p Policy) String() string {
+	switch p {
+	case Error:
+		return "picker-error"
+
+	case RoundrobinBalanced:
+		return "picker-roundrobin-balanced"
+
+	case Custom:
+		panic("'custom' picker policy is not supported yet")
+
+	default:
+		panic(fmt.Errorf("invalid balancer picker policy (%d)", p))
+	}
+}
+
+// New creates a new Picker.
+func New(cfg Config) Picker {
+	switch cfg.Policy {
+	case Error:
+		panic("'error' picker policy is not supported here; use 'picker.NewErr'")
+
+	case RoundrobinBalanced:
+		return newRoundrobinBalanced(cfg)
+
+	case Custom:
+		panic("'custom' picker policy is not supported yet")
+
+	default:
+		panic(fmt.Errorf("invalid balancer picker policy (%d)", cfg.Policy))
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
new file mode 100644
index 0000000..1b8b285
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/picker/roundrobin_balanced.go
@@ -0,0 +1,95 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package picker
+
+import (
+	"context"
+	"sync"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+// newRoundrobinBalanced returns a new roundrobin balanced picker.
+func newRoundrobinBalanced(cfg Config) Picker {
+	scs := make([]balancer.SubConn, 0, len(cfg.SubConnToResolverAddress))
+	for sc := range cfg.SubConnToResolverAddress {
+		scs = append(scs, sc)
+	}
+	return &rrBalanced{
+		p:        RoundrobinBalanced,
+		lg:       cfg.Logger,
+		scs:      scs,
+		scToAddr: cfg.SubConnToResolverAddress,
+	}
+}
+
+type rrBalanced struct {
+	p Policy
+
+	lg *zap.Logger
+
+	mu       sync.RWMutex
+	next     int
+	scs      []balancer.SubConn
+	scToAddr map[balancer.SubConn]resolver.Address
+}
+
+func (rb *rrBalanced) String() string { return rb.p.String() }
+
+// Pick is called for every client request.
+func (rb *rrBalanced) Pick(ctx context.Context, opts balancer.PickOptions) (balancer.SubConn, func(balancer.DoneInfo), error) {
+	rb.mu.RLock()
+	n := len(rb.scs)
+	rb.mu.RUnlock()
+	if n == 0 {
+		return nil, nil, balancer.ErrNoSubConnAvailable
+	}
+
+	rb.mu.Lock()
+	cur := rb.next
+	sc := rb.scs[cur]
+	picked := rb.scToAddr[sc].Addr
+	rb.next = (rb.next + 1) % len(rb.scs)
+	rb.mu.Unlock()
+
+	rb.lg.Debug(
+		"picked",
+		zap.String("picker", rb.p.String()),
+		zap.String("address", picked),
+		zap.Int("subconn-index", cur),
+		zap.Int("subconn-size", n),
+	)
+
+	doneFunc := func(info balancer.DoneInfo) {
+		// TODO: error handling?
+		fss := []zapcore.Field{
+			zap.Error(info.Err),
+			zap.String("picker", rb.p.String()),
+			zap.String("address", picked),
+			zap.Bool("success", info.Err == nil),
+			zap.Bool("bytes-sent", info.BytesSent),
+			zap.Bool("bytes-received", info.BytesReceived),
+		}
+		if info.Err == nil {
+			rb.lg.Debug("balancer done", fss...)
+		} else {
+			rb.lg.Warn("balancer failed", fss...)
+		}
+	}
+	return sc, doneFunc, nil
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
new file mode 100644
index 0000000..864b5df
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/resolver/endpoint/endpoint.go
@@ -0,0 +1,247 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package endpoint resolves etcd entpoints using grpc targets of the form 'endpoint://<id>/<endpoint>'.
+package endpoint
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"net/url"
+	"strings"
+	"sync"
+
+	"google.golang.org/grpc/resolver"
+)
+
+const scheme = "endpoint"
+
+var (
+	targetPrefix = fmt.Sprintf("%s://", scheme)
+
+	bldr *builder
+)
+
+func init() {
+	bldr = &builder{
+		resolverGroups: make(map[string]*ResolverGroup),
+	}
+	resolver.Register(bldr)
+}
+
+type builder struct {
+	mu             sync.RWMutex
+	resolverGroups map[string]*ResolverGroup
+}
+
+// NewResolverGroup creates a new ResolverGroup with the given id.
+func NewResolverGroup(id string) (*ResolverGroup, error) {
+	return bldr.newResolverGroup(id)
+}
+
+// ResolverGroup keeps all endpoints of resolvers using a common endpoint://<id>/ target
+// up-to-date.
+type ResolverGroup struct {
+	mu        sync.RWMutex
+	id        string
+	endpoints []string
+	resolvers []*Resolver
+}
+
+func (e *ResolverGroup) addResolver(r *Resolver) {
+	e.mu.Lock()
+	addrs := epsToAddrs(e.endpoints...)
+	e.resolvers = append(e.resolvers, r)
+	e.mu.Unlock()
+	r.cc.NewAddress(addrs)
+}
+
+func (e *ResolverGroup) removeResolver(r *Resolver) {
+	e.mu.Lock()
+	for i, er := range e.resolvers {
+		if er == r {
+			e.resolvers = append(e.resolvers[:i], e.resolvers[i+1:]...)
+			break
+		}
+	}
+	e.mu.Unlock()
+}
+
+// SetEndpoints updates the endpoints for ResolverGroup. All registered resolver are updated
+// immediately with the new endpoints.
+func (e *ResolverGroup) SetEndpoints(endpoints []string) {
+	addrs := epsToAddrs(endpoints...)
+	e.mu.Lock()
+	e.endpoints = endpoints
+	for _, r := range e.resolvers {
+		r.cc.NewAddress(addrs)
+	}
+	e.mu.Unlock()
+}
+
+// Target constructs a endpoint target using the endpoint id of the ResolverGroup.
+func (e *ResolverGroup) Target(endpoint string) string {
+	return Target(e.id, endpoint)
+}
+
+// Target constructs a endpoint resolver target.
+func Target(id, endpoint string) string {
+	return fmt.Sprintf("%s://%s/%s", scheme, id, endpoint)
+}
+
+// IsTarget checks if a given target string in an endpoint resolver target.
+func IsTarget(target string) bool {
+	return strings.HasPrefix(target, "endpoint://")
+}
+
+func (e *ResolverGroup) Close() {
+	bldr.close(e.id)
+}
+
+// Build creates or reuses an etcd resolver for the etcd cluster name identified by the authority part of the target.
+func (b *builder) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOption) (resolver.Resolver, error) {
+	if len(target.Authority) < 1 {
+		return nil, fmt.Errorf("'etcd' target scheme requires non-empty authority identifying etcd cluster being routed to")
+	}
+	id := target.Authority
+	es, err := b.getResolverGroup(id)
+	if err != nil {
+		return nil, fmt.Errorf("failed to build resolver: %v", err)
+	}
+	r := &Resolver{
+		endpointID: id,
+		cc:         cc,
+	}
+	es.addResolver(r)
+	return r, nil
+}
+
+func (b *builder) newResolverGroup(id string) (*ResolverGroup, error) {
+	b.mu.RLock()
+	_, ok := b.resolverGroups[id]
+	b.mu.RUnlock()
+	if ok {
+		return nil, fmt.Errorf("Endpoint already exists for id: %s", id)
+	}
+
+	es := &ResolverGroup{id: id}
+	b.mu.Lock()
+	b.resolverGroups[id] = es
+	b.mu.Unlock()
+	return es, nil
+}
+
+func (b *builder) getResolverGroup(id string) (*ResolverGroup, error) {
+	b.mu.RLock()
+	es, ok := b.resolverGroups[id]
+	b.mu.RUnlock()
+	if !ok {
+		return nil, fmt.Errorf("ResolverGroup not found for id: %s", id)
+	}
+	return es, nil
+}
+
+func (b *builder) close(id string) {
+	b.mu.Lock()
+	delete(b.resolverGroups, id)
+	b.mu.Unlock()
+}
+
+func (b *builder) Scheme() string {
+	return scheme
+}
+
+// Resolver provides a resolver for a single etcd cluster, identified by name.
+type Resolver struct {
+	endpointID string
+	cc         resolver.ClientConn
+	sync.RWMutex
+}
+
+// TODO: use balancer.epsToAddrs
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+	addrs = make([]resolver.Address, 0, len(eps))
+	for _, ep := range eps {
+		addrs = append(addrs, resolver.Address{Addr: ep})
+	}
+	return addrs
+}
+
+func (*Resolver) ResolveNow(o resolver.ResolveNowOption) {}
+
+func (r *Resolver) Close() {
+	es, err := bldr.getResolverGroup(r.endpointID)
+	if err != nil {
+		return
+	}
+	es.removeResolver(r)
+}
+
+// ParseEndpoint endpoint parses an endpoint of the form
+// (http|https)://<host>*|(unix|unixs)://<path>)
+// and returns a protocol ('tcp' or 'unix'),
+// host (or filepath if a unix socket),
+// scheme (http, https, unix, unixs).
+func ParseEndpoint(endpoint string) (proto string, host string, scheme string) {
+	proto = "tcp"
+	host = endpoint
+	url, uerr := url.Parse(endpoint)
+	if uerr != nil || !strings.Contains(endpoint, "://") {
+		return proto, host, scheme
+	}
+	scheme = url.Scheme
+
+	// strip scheme:// prefix since grpc dials by host
+	host = url.Host
+	switch url.Scheme {
+	case "http", "https":
+	case "unix", "unixs":
+		proto = "unix"
+		host = url.Host + url.Path
+	default:
+		proto, host = "", ""
+	}
+	return proto, host, scheme
+}
+
+// ParseTarget parses a endpoint://<id>/<endpoint> string and returns the parsed id and endpoint.
+// If the target is malformed, an error is returned.
+func ParseTarget(target string) (string, string, error) {
+	noPrefix := strings.TrimPrefix(target, targetPrefix)
+	if noPrefix == target {
+		return "", "", fmt.Errorf("malformed target, %s prefix is required: %s", targetPrefix, target)
+	}
+	parts := strings.SplitN(noPrefix, "/", 2)
+	if len(parts) != 2 {
+		return "", "", fmt.Errorf("malformed target, expected %s://<id>/<endpoint>, but got %s", scheme, target)
+	}
+	return parts[0], parts[1], nil
+}
+
+// Dialer dials a endpoint using net.Dialer.
+// Context cancelation and timeout are supported.
+func Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
+	proto, host, _ := ParseEndpoint(dialEp)
+	select {
+	case <-ctx.Done():
+		return nil, ctx.Err()
+	default:
+	}
+	dialer := &net.Dialer{}
+	if deadline, ok := ctx.Deadline(); ok {
+		dialer.Deadline = deadline
+	}
+	return dialer.DialContext(ctx, proto, host)
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
new file mode 100644
index 0000000..48eb875
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/balancer/utils.go
@@ -0,0 +1,68 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package balancer
+
+import (
+	"fmt"
+	"net/url"
+	"sort"
+	"sync/atomic"
+	"time"
+
+	"google.golang.org/grpc/balancer"
+	"google.golang.org/grpc/resolver"
+)
+
+func scToString(sc balancer.SubConn) string {
+	return fmt.Sprintf("%p", sc)
+}
+
+func scsToStrings(scs map[balancer.SubConn]resolver.Address) (ss []string) {
+	ss = make([]string, 0, len(scs))
+	for sc, a := range scs {
+		ss = append(ss, fmt.Sprintf("%s (%s)", a.Addr, scToString(sc)))
+	}
+	sort.Strings(ss)
+	return ss
+}
+
+func addrsToStrings(addrs []resolver.Address) (ss []string) {
+	ss = make([]string, len(addrs))
+	for i := range addrs {
+		ss[i] = addrs[i].Addr
+	}
+	sort.Strings(ss)
+	return ss
+}
+
+func epsToAddrs(eps ...string) (addrs []resolver.Address) {
+	addrs = make([]resolver.Address, 0, len(eps))
+	for _, ep := range eps {
+		u, err := url.Parse(ep)
+		if err != nil {
+			addrs = append(addrs, resolver.Address{Addr: ep, Type: resolver.Backend})
+			continue
+		}
+		addrs = append(addrs, resolver.Address{Addr: u.Host, Type: resolver.Backend})
+	}
+	return addrs
+}
+
+var genN = new(uint32)
+
+func genName() string {
+	now := time.Now().UnixNano()
+	return fmt.Sprintf("%X%X", now, atomic.AddUint32(genN, 1))
+}
diff --git a/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
new file mode 100644
index 0000000..2dc2012
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/clientv3/credentials/credentials.go
@@ -0,0 +1,173 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package credentials implements gRPC credential interface with etcd specific logic.
+// e.g., client handshake with custom authority parameter
+package credentials
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"sync"
+
+	"github.com/coreos/etcd/clientv3/balancer/resolver/endpoint"
+	"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
+	grpccredentials "google.golang.org/grpc/credentials"
+)
+
+// Config defines gRPC credential configuration.
+type Config struct {
+	TLSConfig *tls.Config
+}
+
+// Bundle defines gRPC credential interface.
+type Bundle interface {
+	grpccredentials.Bundle
+	UpdateAuthToken(token string)
+}
+
+// NewBundle constructs a new gRPC credential bundle.
+func NewBundle(cfg Config) Bundle {
+	return &bundle{
+		tc: newTransportCredential(cfg.TLSConfig),
+		rc: newPerRPCCredential(),
+	}
+}
+
+// bundle implements "grpccredentials.Bundle" interface.
+type bundle struct {
+	tc *transportCredential
+	rc *perRPCCredential
+}
+
+func (b *bundle) TransportCredentials() grpccredentials.TransportCredentials {
+	return b.tc
+}
+
+func (b *bundle) PerRPCCredentials() grpccredentials.PerRPCCredentials {
+	return b.rc
+}
+
+func (b *bundle) NewWithMode(mode string) (grpccredentials.Bundle, error) {
+	// no-op
+	return nil, nil
+}
+
+// transportCredential implements "grpccredentials.TransportCredentials" interface.
+// transportCredential wraps TransportCredentials to track which
+// addresses are dialed for which endpoints, and then sets the authority when checking the endpoint's cert to the
+// hostname or IP of the dialed endpoint.
+// This is a workaround of a gRPC load balancer issue. gRPC uses the dialed target's service name as the authority when
+// checking all endpoint certs, which does not work for etcd servers using their hostname or IP as the Subject Alternative Name
+// in their TLS certs.
+// To enable, include both WithTransportCredentials(creds) and WithContextDialer(creds.Dialer)
+// when dialing.
+type transportCredential struct {
+	gtc grpccredentials.TransportCredentials
+	mu  sync.Mutex
+	// addrToEndpoint maps from the connection addresses that are dialed to the hostname or IP of the
+	// endpoint provided to the dialer when dialing
+	addrToEndpoint map[string]string
+}
+
+func newTransportCredential(cfg *tls.Config) *transportCredential {
+	return &transportCredential{
+		gtc:            grpccredentials.NewTLS(cfg),
+		addrToEndpoint: map[string]string{},
+	}
+}
+
+func (tc *transportCredential) ClientHandshake(ctx context.Context, authority string, rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+	// Set the authority when checking the endpoint's cert to the hostname or IP of the dialed endpoint
+	tc.mu.Lock()
+	dialEp, ok := tc.addrToEndpoint[rawConn.RemoteAddr().String()]
+	tc.mu.Unlock()
+	if ok {
+		_, host, _ := endpoint.ParseEndpoint(dialEp)
+		authority = host
+	}
+	return tc.gtc.ClientHandshake(ctx, authority, rawConn)
+}
+
+// return true if given string is an IP.
+func isIP(ep string) bool {
+	return net.ParseIP(ep) != nil
+}
+
+func (tc *transportCredential) ServerHandshake(rawConn net.Conn) (net.Conn, grpccredentials.AuthInfo, error) {
+	return tc.gtc.ServerHandshake(rawConn)
+}
+
+func (tc *transportCredential) Info() grpccredentials.ProtocolInfo {
+	return tc.gtc.Info()
+}
+
+func (tc *transportCredential) Clone() grpccredentials.TransportCredentials {
+	copy := map[string]string{}
+	tc.mu.Lock()
+	for k, v := range tc.addrToEndpoint {
+		copy[k] = v
+	}
+	tc.mu.Unlock()
+	return &transportCredential{
+		gtc:            tc.gtc.Clone(),
+		addrToEndpoint: copy,
+	}
+}
+
+func (tc *transportCredential) OverrideServerName(serverNameOverride string) error {
+	return tc.gtc.OverrideServerName(serverNameOverride)
+}
+
+func (tc *transportCredential) Dialer(ctx context.Context, dialEp string) (net.Conn, error) {
+	// Keep track of which addresses are dialed for which endpoints
+	conn, err := endpoint.Dialer(ctx, dialEp)
+	if conn != nil {
+		tc.mu.Lock()
+		tc.addrToEndpoint[conn.RemoteAddr().String()] = dialEp
+		tc.mu.Unlock()
+	}
+	return conn, err
+}
+
+// perRPCCredential implements "grpccredentials.PerRPCCredentials" interface.
+type perRPCCredential struct {
+	authToken   string
+	authTokenMu sync.RWMutex
+}
+
+func newPerRPCCredential() *perRPCCredential { return &perRPCCredential{} }
+
+func (rc *perRPCCredential) RequireTransportSecurity() bool { return false }
+
+func (rc *perRPCCredential) GetRequestMetadata(ctx context.Context, s ...string) (map[string]string, error) {
+	rc.authTokenMu.RLock()
+	authToken := rc.authToken
+	rc.authTokenMu.RUnlock()
+	return map[string]string{rpctypes.TokenFieldNameGRPC: authToken}, nil
+}
+
+func (b *bundle) UpdateAuthToken(token string) {
+	if b.rc == nil {
+		return
+	}
+	b.rc.UpdateAuthToken(token)
+}
+
+func (rc *perRPCCredential) UpdateAuthToken(token string) {
+	rc.authTokenMu.Lock()
+	rc.authToken = token
+	rc.authTokenMu.Unlock()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
new file mode 100644
index 0000000..f72c6a6
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpctypes has types and values shared by the etcd server and client for v3 RPC interaction.
+package rpctypes
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
new file mode 100644
index 0000000..bc1ad7b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/error.go
@@ -0,0 +1,217 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+import (
+	"google.golang.org/grpc/codes"
+	"google.golang.org/grpc/status"
+)
+
+// server-side error
+var (
+	ErrGRPCEmptyKey      = status.New(codes.InvalidArgument, "etcdserver: key is not provided").Err()
+	ErrGRPCKeyNotFound   = status.New(codes.InvalidArgument, "etcdserver: key not found").Err()
+	ErrGRPCValueProvided = status.New(codes.InvalidArgument, "etcdserver: value is provided").Err()
+	ErrGRPCLeaseProvided = status.New(codes.InvalidArgument, "etcdserver: lease is provided").Err()
+	ErrGRPCTooManyOps    = status.New(codes.InvalidArgument, "etcdserver: too many operations in txn request").Err()
+	ErrGRPCDuplicateKey  = status.New(codes.InvalidArgument, "etcdserver: duplicate key given in txn request").Err()
+	ErrGRPCCompacted     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision has been compacted").Err()
+	ErrGRPCFutureRev     = status.New(codes.OutOfRange, "etcdserver: mvcc: required revision is a future revision").Err()
+	ErrGRPCNoSpace       = status.New(codes.ResourceExhausted, "etcdserver: mvcc: database space exceeded").Err()
+
+	ErrGRPCLeaseNotFound    = status.New(codes.NotFound, "etcdserver: requested lease not found").Err()
+	ErrGRPCLeaseExist       = status.New(codes.FailedPrecondition, "etcdserver: lease already exists").Err()
+	ErrGRPCLeaseTTLTooLarge = status.New(codes.OutOfRange, "etcdserver: too large lease TTL").Err()
+
+	ErrGRPCMemberExist            = status.New(codes.FailedPrecondition, "etcdserver: member ID already exist").Err()
+	ErrGRPCPeerURLExist           = status.New(codes.FailedPrecondition, "etcdserver: Peer URLs already exists").Err()
+	ErrGRPCMemberNotEnoughStarted = status.New(codes.FailedPrecondition, "etcdserver: re-configuration failed due to not enough started members").Err()
+	ErrGRPCMemberBadURLs          = status.New(codes.InvalidArgument, "etcdserver: given member URLs are invalid").Err()
+	ErrGRPCMemberNotFound         = status.New(codes.NotFound, "etcdserver: member not found").Err()
+
+	ErrGRPCRequestTooLarge        = status.New(codes.InvalidArgument, "etcdserver: request is too large").Err()
+	ErrGRPCRequestTooManyRequests = status.New(codes.ResourceExhausted, "etcdserver: too many requests").Err()
+
+	ErrGRPCRootUserNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not exist").Err()
+	ErrGRPCRootRoleNotExist     = status.New(codes.FailedPrecondition, "etcdserver: root user does not have root role").Err()
+	ErrGRPCUserAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: user name already exists").Err()
+	ErrGRPCUserEmpty            = status.New(codes.InvalidArgument, "etcdserver: user name is empty").Err()
+	ErrGRPCUserNotFound         = status.New(codes.FailedPrecondition, "etcdserver: user name not found").Err()
+	ErrGRPCRoleAlreadyExist     = status.New(codes.FailedPrecondition, "etcdserver: role name already exists").Err()
+	ErrGRPCRoleNotFound         = status.New(codes.FailedPrecondition, "etcdserver: role name not found").Err()
+	ErrGRPCAuthFailed           = status.New(codes.InvalidArgument, "etcdserver: authentication failed, invalid user ID or password").Err()
+	ErrGRPCPermissionDenied     = status.New(codes.PermissionDenied, "etcdserver: permission denied").Err()
+	ErrGRPCRoleNotGranted       = status.New(codes.FailedPrecondition, "etcdserver: role is not granted to the user").Err()
+	ErrGRPCPermissionNotGranted = status.New(codes.FailedPrecondition, "etcdserver: permission is not granted to the role").Err()
+	ErrGRPCAuthNotEnabled       = status.New(codes.FailedPrecondition, "etcdserver: authentication is not enabled").Err()
+	ErrGRPCInvalidAuthToken     = status.New(codes.Unauthenticated, "etcdserver: invalid auth token").Err()
+	ErrGRPCInvalidAuthMgmt      = status.New(codes.InvalidArgument, "etcdserver: invalid auth management").Err()
+
+	ErrGRPCNoLeader                   = status.New(codes.Unavailable, "etcdserver: no leader").Err()
+	ErrGRPCNotLeader                  = status.New(codes.FailedPrecondition, "etcdserver: not leader").Err()
+	ErrGRPCLeaderChanged              = status.New(codes.Unavailable, "etcdserver: leader changed").Err()
+	ErrGRPCNotCapable                 = status.New(codes.Unavailable, "etcdserver: not capable").Err()
+	ErrGRPCStopped                    = status.New(codes.Unavailable, "etcdserver: server stopped").Err()
+	ErrGRPCTimeout                    = status.New(codes.Unavailable, "etcdserver: request timed out").Err()
+	ErrGRPCTimeoutDueToLeaderFail     = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to previous leader failure").Err()
+	ErrGRPCTimeoutDueToConnectionLost = status.New(codes.Unavailable, "etcdserver: request timed out, possibly due to connection lost").Err()
+	ErrGRPCUnhealthy                  = status.New(codes.Unavailable, "etcdserver: unhealthy cluster").Err()
+	ErrGRPCCorrupt                    = status.New(codes.DataLoss, "etcdserver: corrupt cluster").Err()
+
+	errStringToError = map[string]error{
+		ErrorDesc(ErrGRPCEmptyKey):      ErrGRPCEmptyKey,
+		ErrorDesc(ErrGRPCKeyNotFound):   ErrGRPCKeyNotFound,
+		ErrorDesc(ErrGRPCValueProvided): ErrGRPCValueProvided,
+		ErrorDesc(ErrGRPCLeaseProvided): ErrGRPCLeaseProvided,
+
+		ErrorDesc(ErrGRPCTooManyOps):   ErrGRPCTooManyOps,
+		ErrorDesc(ErrGRPCDuplicateKey): ErrGRPCDuplicateKey,
+		ErrorDesc(ErrGRPCCompacted):    ErrGRPCCompacted,
+		ErrorDesc(ErrGRPCFutureRev):    ErrGRPCFutureRev,
+		ErrorDesc(ErrGRPCNoSpace):      ErrGRPCNoSpace,
+
+		ErrorDesc(ErrGRPCLeaseNotFound):    ErrGRPCLeaseNotFound,
+		ErrorDesc(ErrGRPCLeaseExist):       ErrGRPCLeaseExist,
+		ErrorDesc(ErrGRPCLeaseTTLTooLarge): ErrGRPCLeaseTTLTooLarge,
+
+		ErrorDesc(ErrGRPCMemberExist):            ErrGRPCMemberExist,
+		ErrorDesc(ErrGRPCPeerURLExist):           ErrGRPCPeerURLExist,
+		ErrorDesc(ErrGRPCMemberNotEnoughStarted): ErrGRPCMemberNotEnoughStarted,
+		ErrorDesc(ErrGRPCMemberBadURLs):          ErrGRPCMemberBadURLs,
+		ErrorDesc(ErrGRPCMemberNotFound):         ErrGRPCMemberNotFound,
+
+		ErrorDesc(ErrGRPCRequestTooLarge):        ErrGRPCRequestTooLarge,
+		ErrorDesc(ErrGRPCRequestTooManyRequests): ErrGRPCRequestTooManyRequests,
+
+		ErrorDesc(ErrGRPCRootUserNotExist):     ErrGRPCRootUserNotExist,
+		ErrorDesc(ErrGRPCRootRoleNotExist):     ErrGRPCRootRoleNotExist,
+		ErrorDesc(ErrGRPCUserAlreadyExist):     ErrGRPCUserAlreadyExist,
+		ErrorDesc(ErrGRPCUserEmpty):            ErrGRPCUserEmpty,
+		ErrorDesc(ErrGRPCUserNotFound):         ErrGRPCUserNotFound,
+		ErrorDesc(ErrGRPCRoleAlreadyExist):     ErrGRPCRoleAlreadyExist,
+		ErrorDesc(ErrGRPCRoleNotFound):         ErrGRPCRoleNotFound,
+		ErrorDesc(ErrGRPCAuthFailed):           ErrGRPCAuthFailed,
+		ErrorDesc(ErrGRPCPermissionDenied):     ErrGRPCPermissionDenied,
+		ErrorDesc(ErrGRPCRoleNotGranted):       ErrGRPCRoleNotGranted,
+		ErrorDesc(ErrGRPCPermissionNotGranted): ErrGRPCPermissionNotGranted,
+		ErrorDesc(ErrGRPCAuthNotEnabled):       ErrGRPCAuthNotEnabled,
+		ErrorDesc(ErrGRPCInvalidAuthToken):     ErrGRPCInvalidAuthToken,
+		ErrorDesc(ErrGRPCInvalidAuthMgmt):      ErrGRPCInvalidAuthMgmt,
+
+		ErrorDesc(ErrGRPCNoLeader):                   ErrGRPCNoLeader,
+		ErrorDesc(ErrGRPCNotLeader):                  ErrGRPCNotLeader,
+		ErrorDesc(ErrGRPCNotCapable):                 ErrGRPCNotCapable,
+		ErrorDesc(ErrGRPCStopped):                    ErrGRPCStopped,
+		ErrorDesc(ErrGRPCTimeout):                    ErrGRPCTimeout,
+		ErrorDesc(ErrGRPCTimeoutDueToLeaderFail):     ErrGRPCTimeoutDueToLeaderFail,
+		ErrorDesc(ErrGRPCTimeoutDueToConnectionLost): ErrGRPCTimeoutDueToConnectionLost,
+		ErrorDesc(ErrGRPCUnhealthy):                  ErrGRPCUnhealthy,
+		ErrorDesc(ErrGRPCCorrupt):                    ErrGRPCCorrupt,
+	}
+)
+
+// client-side error
+var (
+	ErrEmptyKey      = Error(ErrGRPCEmptyKey)
+	ErrKeyNotFound   = Error(ErrGRPCKeyNotFound)
+	ErrValueProvided = Error(ErrGRPCValueProvided)
+	ErrLeaseProvided = Error(ErrGRPCLeaseProvided)
+	ErrTooManyOps    = Error(ErrGRPCTooManyOps)
+	ErrDuplicateKey  = Error(ErrGRPCDuplicateKey)
+	ErrCompacted     = Error(ErrGRPCCompacted)
+	ErrFutureRev     = Error(ErrGRPCFutureRev)
+	ErrNoSpace       = Error(ErrGRPCNoSpace)
+
+	ErrLeaseNotFound    = Error(ErrGRPCLeaseNotFound)
+	ErrLeaseExist       = Error(ErrGRPCLeaseExist)
+	ErrLeaseTTLTooLarge = Error(ErrGRPCLeaseTTLTooLarge)
+
+	ErrMemberExist            = Error(ErrGRPCMemberExist)
+	ErrPeerURLExist           = Error(ErrGRPCPeerURLExist)
+	ErrMemberNotEnoughStarted = Error(ErrGRPCMemberNotEnoughStarted)
+	ErrMemberBadURLs          = Error(ErrGRPCMemberBadURLs)
+	ErrMemberNotFound         = Error(ErrGRPCMemberNotFound)
+
+	ErrRequestTooLarge = Error(ErrGRPCRequestTooLarge)
+	ErrTooManyRequests = Error(ErrGRPCRequestTooManyRequests)
+
+	ErrRootUserNotExist     = Error(ErrGRPCRootUserNotExist)
+	ErrRootRoleNotExist     = Error(ErrGRPCRootRoleNotExist)
+	ErrUserAlreadyExist     = Error(ErrGRPCUserAlreadyExist)
+	ErrUserEmpty            = Error(ErrGRPCUserEmpty)
+	ErrUserNotFound         = Error(ErrGRPCUserNotFound)
+	ErrRoleAlreadyExist     = Error(ErrGRPCRoleAlreadyExist)
+	ErrRoleNotFound         = Error(ErrGRPCRoleNotFound)
+	ErrAuthFailed           = Error(ErrGRPCAuthFailed)
+	ErrPermissionDenied     = Error(ErrGRPCPermissionDenied)
+	ErrRoleNotGranted       = Error(ErrGRPCRoleNotGranted)
+	ErrPermissionNotGranted = Error(ErrGRPCPermissionNotGranted)
+	ErrAuthNotEnabled       = Error(ErrGRPCAuthNotEnabled)
+	ErrInvalidAuthToken     = Error(ErrGRPCInvalidAuthToken)
+	ErrInvalidAuthMgmt      = Error(ErrGRPCInvalidAuthMgmt)
+
+	ErrNoLeader                   = Error(ErrGRPCNoLeader)
+	ErrNotLeader                  = Error(ErrGRPCNotLeader)
+	ErrLeaderChanged              = Error(ErrGRPCLeaderChanged)
+	ErrNotCapable                 = Error(ErrGRPCNotCapable)
+	ErrStopped                    = Error(ErrGRPCStopped)
+	ErrTimeout                    = Error(ErrGRPCTimeout)
+	ErrTimeoutDueToLeaderFail     = Error(ErrGRPCTimeoutDueToLeaderFail)
+	ErrTimeoutDueToConnectionLost = Error(ErrGRPCTimeoutDueToConnectionLost)
+	ErrUnhealthy                  = Error(ErrGRPCUnhealthy)
+	ErrCorrupt                    = Error(ErrGRPCCorrupt)
+)
+
+// EtcdError defines gRPC server errors.
+// (https://github.com/grpc/grpc-go/blob/master/rpc_util.go#L319-L323)
+type EtcdError struct {
+	code codes.Code
+	desc string
+}
+
+// Code returns grpc/codes.Code.
+// TODO: define clientv3/codes.Code.
+func (e EtcdError) Code() codes.Code {
+	return e.code
+}
+
+func (e EtcdError) Error() string {
+	return e.desc
+}
+
+func Error(err error) error {
+	if err == nil {
+		return nil
+	}
+	verr, ok := errStringToError[ErrorDesc(err)]
+	if !ok { // not gRPC error
+		return err
+	}
+	ev, ok := status.FromError(verr)
+	var desc string
+	if ok {
+		desc = ev.Message()
+	} else {
+		desc = verr.Error()
+	}
+	return EtcdError{code: ev.Code(), desc: desc}
+}
+
+func ErrorDesc(err error) string {
+	if s, ok := status.FromError(err); ok {
+		return s.Message()
+	}
+	return err.Error()
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
new file mode 100644
index 0000000..90b8b83
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/md.go
@@ -0,0 +1,22 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+	MetadataRequireLeaderKey = "hasleader"
+	MetadataHasLeader        = "true"
+
+	MetadataClientAPIVersionKey = "client-api-version"
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
new file mode 100644
index 0000000..8f8ac60
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes/metadatafields.go
@@ -0,0 +1,20 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpctypes
+
+var (
+	TokenFieldNameGRPC    = "token"
+	TokenFieldNameSwagger = "authorization"
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
new file mode 100644
index 0000000..12b6763
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.pb.go
@@ -0,0 +1,1034 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: etcdserver.proto
+
+package etcdserverpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Request struct {
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID" json:"ID"`
+	Method               string   `protobuf:"bytes,2,opt,name=Method" json:"Method"`
+	Path                 string   `protobuf:"bytes,3,opt,name=Path" json:"Path"`
+	Val                  string   `protobuf:"bytes,4,opt,name=Val" json:"Val"`
+	Dir                  bool     `protobuf:"varint,5,opt,name=Dir" json:"Dir"`
+	PrevValue            string   `protobuf:"bytes,6,opt,name=PrevValue" json:"PrevValue"`
+	PrevIndex            uint64   `protobuf:"varint,7,opt,name=PrevIndex" json:"PrevIndex"`
+	PrevExist            *bool    `protobuf:"varint,8,opt,name=PrevExist" json:"PrevExist,omitempty"`
+	Expiration           int64    `protobuf:"varint,9,opt,name=Expiration" json:"Expiration"`
+	Wait                 bool     `protobuf:"varint,10,opt,name=Wait" json:"Wait"`
+	Since                uint64   `protobuf:"varint,11,opt,name=Since" json:"Since"`
+	Recursive            bool     `protobuf:"varint,12,opt,name=Recursive" json:"Recursive"`
+	Sorted               bool     `protobuf:"varint,13,opt,name=Sorted" json:"Sorted"`
+	Quorum               bool     `protobuf:"varint,14,opt,name=Quorum" json:"Quorum"`
+	Time                 int64    `protobuf:"varint,15,opt,name=Time" json:"Time"`
+	Stream               bool     `protobuf:"varint,16,opt,name=Stream" json:"Stream"`
+	Refresh              *bool    `protobuf:"varint,17,opt,name=Refresh" json:"Refresh,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Request) Reset()         { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage()    {}
+func (*Request) Descriptor() ([]byte, []int) {
+	return fileDescriptor_09ffbeb3bebbce7e, []int{0}
+}
+func (m *Request) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Request.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Request) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Request.Merge(m, src)
+}
+func (m *Request) XXX_Size() int {
+	return m.Size()
+}
+func (m *Request) XXX_DiscardUnknown() {
+	xxx_messageInfo_Request.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Request proto.InternalMessageInfo
+
+type Metadata struct {
+	NodeID               uint64   `protobuf:"varint,1,opt,name=NodeID" json:"NodeID"`
+	ClusterID            uint64   `protobuf:"varint,2,opt,name=ClusterID" json:"ClusterID"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Metadata) Reset()         { *m = Metadata{} }
+func (m *Metadata) String() string { return proto.CompactTextString(m) }
+func (*Metadata) ProtoMessage()    {}
+func (*Metadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_09ffbeb3bebbce7e, []int{1}
+}
+func (m *Metadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Metadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Metadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Metadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Metadata.Merge(m, src)
+}
+func (m *Metadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *Metadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_Metadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Metadata proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*Request)(nil), "etcdserverpb.Request")
+	proto.RegisterType((*Metadata)(nil), "etcdserverpb.Metadata")
+}
+
+func init() { proto.RegisterFile("etcdserver.proto", fileDescriptor_09ffbeb3bebbce7e) }
+
+var fileDescriptor_09ffbeb3bebbce7e = []byte{
+	// 380 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0xd2, 0xdd, 0x6e, 0xda, 0x30,
+	0x14, 0x07, 0x70, 0x0c, 0xe1, 0xcb, 0x63, 0x1b, 0xb3, 0xd0, 0x74, 0x84, 0xa6, 0x2c, 0x42, 0xbb,
+	0xc8, 0xd5, 0xf6, 0x0e, 0x2c, 0x5c, 0x44, 0x2a, 0x15, 0x0d, 0x15, 0xbd, 0x76, 0xc9, 0x29, 0x58,
+	0x02, 0x4c, 0x1d, 0x07, 0xf1, 0x06, 0x7d, 0x85, 0x3e, 0x12, 0x97, 0x7d, 0x82, 0xaa, 0xa5, 0x2f,
+	0x52, 0x39, 0x24, 0xc4, 0xed, 0x5d, 0xf4, 0xfb, 0x9f, 0x1c, 0x1f, 0x7f, 0xd0, 0x2e, 0xea, 0x79,
+	0x9c, 0xa0, 0xda, 0xa1, 0xfa, 0xbb, 0x55, 0x52, 0x4b, 0xd6, 0x29, 0x65, 0x7b, 0xdb, 0xef, 0x2d,
+	0xe4, 0x42, 0x66, 0xc1, 0x3f, 0xf3, 0x75, 0xaa, 0x19, 0x3c, 0x38, 0xb4, 0x19, 0xe1, 0x7d, 0x8a,
+	0x89, 0x66, 0x3d, 0x5a, 0x0d, 0x03, 0x20, 0x1e, 0xf1, 0x9d, 0xa1, 0x73, 0x78, 0xfe, 0x5d, 0x89,
+	0xaa, 0x61, 0xc0, 0x7e, 0xd1, 0xc6, 0x18, 0xf5, 0x52, 0xc6, 0x50, 0xf5, 0x88, 0xdf, 0xce, 0x93,
+	0xdc, 0x18, 0x50, 0x67, 0xc2, 0xf5, 0x12, 0x6a, 0x56, 0x96, 0x09, 0xfb, 0x49, 0x6b, 0x33, 0xbe,
+	0x02, 0xc7, 0x0a, 0x0c, 0x18, 0x0f, 0x84, 0x82, 0xba, 0x47, 0xfc, 0x56, 0xe1, 0x81, 0x50, 0x6c,
+	0x40, 0xdb, 0x13, 0x85, 0xbb, 0x19, 0x5f, 0xa5, 0x08, 0x0d, 0xeb, 0xaf, 0x92, 0x8b, 0x9a, 0x70,
+	0x13, 0xe3, 0x1e, 0x9a, 0xd6, 0xa0, 0x25, 0x17, 0x35, 0xa3, 0xbd, 0x48, 0x34, 0xb4, 0xce, 0xab,
+	0x90, 0xa8, 0x64, 0xf6, 0x87, 0xd2, 0xd1, 0x7e, 0x2b, 0x14, 0xd7, 0x42, 0x6e, 0xa0, 0xed, 0x11,
+	0xbf, 0x96, 0x37, 0xb2, 0xdc, 0xec, 0xed, 0x86, 0x0b, 0x0d, 0xd4, 0x1a, 0x35, 0x13, 0xd6, 0xa7,
+	0xf5, 0xa9, 0xd8, 0xcc, 0x11, 0xbe, 0x58, 0x33, 0x9c, 0xc8, 0xac, 0x1f, 0xe1, 0x3c, 0x55, 0x89,
+	0xd8, 0x21, 0x74, 0xac, 0x5f, 0x4b, 0x36, 0x67, 0x3a, 0x95, 0x4a, 0x63, 0x0c, 0x5f, 0xad, 0x82,
+	0xdc, 0x4c, 0x7a, 0x95, 0x4a, 0x95, 0xae, 0xe1, 0x9b, 0x9d, 0x9e, 0xcc, 0x4c, 0x75, 0x2d, 0xd6,
+	0x08, 0xdf, 0xad, 0xa9, 0x33, 0xc9, 0xba, 0x6a, 0x85, 0x7c, 0x0d, 0xdd, 0x0f, 0x5d, 0x33, 0x63,
+	0xae, 0xb9, 0xe8, 0x3b, 0x85, 0xc9, 0x12, 0x7e, 0x58, 0xa7, 0x52, 0xe0, 0xe0, 0x82, 0xb6, 0xc6,
+	0xa8, 0x79, 0xcc, 0x35, 0x37, 0x9d, 0x2e, 0x65, 0x8c, 0x9f, 0x5e, 0x43, 0x6e, 0x66, 0x87, 0xff,
+	0x57, 0x69, 0xa2, 0x51, 0x85, 0x41, 0xf6, 0x28, 0xce, 0xb7, 0x70, 0xe6, 0x61, 0xef, 0xf0, 0xea,
+	0x56, 0x0e, 0x47, 0x97, 0x3c, 0x1d, 0x5d, 0xf2, 0x72, 0x74, 0xc9, 0xe3, 0x9b, 0x5b, 0x79, 0x0f,
+	0x00, 0x00, 0xff, 0xff, 0xee, 0x40, 0xba, 0xd6, 0xa4, 0x02, 0x00, 0x00,
+}
+
+func (m *Request) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Request) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Request) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Refresh != nil {
+		i--
+		if *m.Refresh {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x1
+		i--
+		dAtA[i] = 0x88
+	}
+	i--
+	if m.Stream {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x1
+	i--
+	dAtA[i] = 0x80
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Time))
+	i--
+	dAtA[i] = 0x78
+	i--
+	if m.Quorum {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x70
+	i--
+	if m.Sorted {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x68
+	i--
+	if m.Recursive {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x60
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Since))
+	i--
+	dAtA[i] = 0x58
+	i--
+	if m.Wait {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.Expiration))
+	i--
+	dAtA[i] = 0x48
+	if m.PrevExist != nil {
+		i--
+		if *m.PrevExist {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.PrevIndex))
+	i--
+	dAtA[i] = 0x38
+	i -= len(m.PrevValue)
+	copy(dAtA[i:], m.PrevValue)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.PrevValue)))
+	i--
+	dAtA[i] = 0x32
+	i--
+	if m.Dir {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x28
+	i -= len(m.Val)
+	copy(dAtA[i:], m.Val)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Val)))
+	i--
+	dAtA[i] = 0x22
+	i -= len(m.Path)
+	copy(dAtA[i:], m.Path)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Path)))
+	i--
+	dAtA[i] = 0x1a
+	i -= len(m.Method)
+	copy(dAtA[i:], m.Method)
+	i = encodeVarintEtcdserver(dAtA, i, uint64(len(m.Method)))
+	i--
+	dAtA[i] = 0x12
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *Metadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Metadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Metadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.ClusterID))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintEtcdserver(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintEtcdserver(dAtA []byte, offset int, v uint64) int {
+	offset -= sovEtcdserver(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Request) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovEtcdserver(uint64(m.ID))
+	l = len(m.Method)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	l = len(m.Path)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	l = len(m.Val)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	n += 2
+	l = len(m.PrevValue)
+	n += 1 + l + sovEtcdserver(uint64(l))
+	n += 1 + sovEtcdserver(uint64(m.PrevIndex))
+	if m.PrevExist != nil {
+		n += 2
+	}
+	n += 1 + sovEtcdserver(uint64(m.Expiration))
+	n += 2
+	n += 1 + sovEtcdserver(uint64(m.Since))
+	n += 2
+	n += 2
+	n += 2
+	n += 1 + sovEtcdserver(uint64(m.Time))
+	n += 3
+	if m.Refresh != nil {
+		n += 3
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Metadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovEtcdserver(uint64(m.NodeID))
+	n += 1 + sovEtcdserver(uint64(m.ClusterID))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovEtcdserver(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozEtcdserver(x uint64) (n int) {
+	return sovEtcdserver(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Request) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Request: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Request: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Method = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Path = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Val = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Dir", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Dir = bool(v != 0)
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevValue", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PrevValue = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevIndex", wireType)
+			}
+			m.PrevIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.PrevIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevExist", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.PrevExist = &b
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Expiration", wireType)
+			}
+			m.Expiration = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Expiration |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Wait", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Wait = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Since", wireType)
+			}
+			m.Since = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Since |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Recursive", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Recursive = bool(v != 0)
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Sorted", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Sorted = bool(v != 0)
+		case 14:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Quorum", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Quorum = bool(v != 0)
+		case 15:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Time", wireType)
+			}
+			m.Time = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Time |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 16:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Stream", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Stream = bool(v != 0)
+		case 17:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Refresh", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			b := bool(v != 0)
+			m.Refresh = &b
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEtcdserver(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Metadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Metadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Metadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterID", wireType)
+			}
+			m.ClusterID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ClusterID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipEtcdserver(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthEtcdserver
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipEtcdserver(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowEtcdserver
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowEtcdserver
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthEtcdserver
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthEtcdserver
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowEtcdserver
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipEtcdserver(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthEtcdserver
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthEtcdserver = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowEtcdserver   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
new file mode 100644
index 0000000..25e0aca
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/etcdserver.proto
@@ -0,0 +1,34 @@
+syntax = "proto2";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message Request {
+	optional uint64 ID         =  1 [(gogoproto.nullable) = false];
+	optional string Method     =  2 [(gogoproto.nullable) = false];
+	optional string Path       =  3 [(gogoproto.nullable) = false];
+	optional string Val        =  4 [(gogoproto.nullable) = false];
+	optional bool   Dir        =  5 [(gogoproto.nullable) = false];
+	optional string PrevValue  =  6 [(gogoproto.nullable) = false];
+	optional uint64 PrevIndex  =  7 [(gogoproto.nullable) = false];
+	optional bool   PrevExist  =  8 [(gogoproto.nullable) = true];
+	optional int64  Expiration =  9 [(gogoproto.nullable) = false];
+	optional bool   Wait       = 10 [(gogoproto.nullable) = false];
+	optional uint64 Since      = 11 [(gogoproto.nullable) = false];
+	optional bool   Recursive  = 12 [(gogoproto.nullable) = false];
+	optional bool   Sorted     = 13 [(gogoproto.nullable) = false];
+	optional bool   Quorum     = 14 [(gogoproto.nullable) = false];
+	optional int64  Time       = 15 [(gogoproto.nullable) = false];
+	optional bool   Stream     = 16 [(gogoproto.nullable) = false];
+	optional bool   Refresh    = 17 [(gogoproto.nullable) = true];
+}
+
+message Metadata {
+	optional uint64 NodeID    = 1 [(gogoproto.nullable) = false];
+	optional uint64 ClusterID = 2 [(gogoproto.nullable) = false];
+}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
new file mode 100644
index 0000000..b3a199e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.pb.go
@@ -0,0 +1,2427 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft_internal.proto
+
+package etcdserverpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type RequestHeader struct {
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// username is a username that is associated with an auth token of gRPC connection
+	Username string `protobuf:"bytes,2,opt,name=username,proto3" json:"username,omitempty"`
+	// auth_revision is a revision number of auth.authStore. It is not related to mvcc
+	AuthRevision         uint64   `protobuf:"varint,3,opt,name=auth_revision,json=authRevision,proto3" json:"auth_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RequestHeader) Reset()         { *m = RequestHeader{} }
+func (m *RequestHeader) String() string { return proto.CompactTextString(m) }
+func (*RequestHeader) ProtoMessage()    {}
+func (*RequestHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{0}
+}
+func (m *RequestHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestHeader.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestHeader.Merge(m, src)
+}
+func (m *RequestHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestHeader proto.InternalMessageInfo
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+type InternalRaftRequest struct {
+	Header                   *RequestHeader                   `protobuf:"bytes,100,opt,name=header,proto3" json:"header,omitempty"`
+	ID                       uint64                           `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	V2                       *Request                         `protobuf:"bytes,2,opt,name=v2,proto3" json:"v2,omitempty"`
+	Range                    *RangeRequest                    `protobuf:"bytes,3,opt,name=range,proto3" json:"range,omitempty"`
+	Put                      *PutRequest                      `protobuf:"bytes,4,opt,name=put,proto3" json:"put,omitempty"`
+	DeleteRange              *DeleteRangeRequest              `protobuf:"bytes,5,opt,name=delete_range,json=deleteRange,proto3" json:"delete_range,omitempty"`
+	Txn                      *TxnRequest                      `protobuf:"bytes,6,opt,name=txn,proto3" json:"txn,omitempty"`
+	Compaction               *CompactionRequest               `protobuf:"bytes,7,opt,name=compaction,proto3" json:"compaction,omitempty"`
+	LeaseGrant               *LeaseGrantRequest               `protobuf:"bytes,8,opt,name=lease_grant,json=leaseGrant,proto3" json:"lease_grant,omitempty"`
+	LeaseRevoke              *LeaseRevokeRequest              `protobuf:"bytes,9,opt,name=lease_revoke,json=leaseRevoke,proto3" json:"lease_revoke,omitempty"`
+	Alarm                    *AlarmRequest                    `protobuf:"bytes,10,opt,name=alarm,proto3" json:"alarm,omitempty"`
+	AuthEnable               *AuthEnableRequest               `protobuf:"bytes,1000,opt,name=auth_enable,json=authEnable,proto3" json:"auth_enable,omitempty"`
+	AuthDisable              *AuthDisableRequest              `protobuf:"bytes,1011,opt,name=auth_disable,json=authDisable,proto3" json:"auth_disable,omitempty"`
+	Authenticate             *InternalAuthenticateRequest     `protobuf:"bytes,1012,opt,name=authenticate,proto3" json:"authenticate,omitempty"`
+	AuthUserAdd              *AuthUserAddRequest              `protobuf:"bytes,1100,opt,name=auth_user_add,json=authUserAdd,proto3" json:"auth_user_add,omitempty"`
+	AuthUserDelete           *AuthUserDeleteRequest           `protobuf:"bytes,1101,opt,name=auth_user_delete,json=authUserDelete,proto3" json:"auth_user_delete,omitempty"`
+	AuthUserGet              *AuthUserGetRequest              `protobuf:"bytes,1102,opt,name=auth_user_get,json=authUserGet,proto3" json:"auth_user_get,omitempty"`
+	AuthUserChangePassword   *AuthUserChangePasswordRequest   `protobuf:"bytes,1103,opt,name=auth_user_change_password,json=authUserChangePassword,proto3" json:"auth_user_change_password,omitempty"`
+	AuthUserGrantRole        *AuthUserGrantRoleRequest        `protobuf:"bytes,1104,opt,name=auth_user_grant_role,json=authUserGrantRole,proto3" json:"auth_user_grant_role,omitempty"`
+	AuthUserRevokeRole       *AuthUserRevokeRoleRequest       `protobuf:"bytes,1105,opt,name=auth_user_revoke_role,json=authUserRevokeRole,proto3" json:"auth_user_revoke_role,omitempty"`
+	AuthUserList             *AuthUserListRequest             `protobuf:"bytes,1106,opt,name=auth_user_list,json=authUserList,proto3" json:"auth_user_list,omitempty"`
+	AuthRoleList             *AuthRoleListRequest             `protobuf:"bytes,1107,opt,name=auth_role_list,json=authRoleList,proto3" json:"auth_role_list,omitempty"`
+	AuthRoleAdd              *AuthRoleAddRequest              `protobuf:"bytes,1200,opt,name=auth_role_add,json=authRoleAdd,proto3" json:"auth_role_add,omitempty"`
+	AuthRoleDelete           *AuthRoleDeleteRequest           `protobuf:"bytes,1201,opt,name=auth_role_delete,json=authRoleDelete,proto3" json:"auth_role_delete,omitempty"`
+	AuthRoleGet              *AuthRoleGetRequest              `protobuf:"bytes,1202,opt,name=auth_role_get,json=authRoleGet,proto3" json:"auth_role_get,omitempty"`
+	AuthRoleGrantPermission  *AuthRoleGrantPermissionRequest  `protobuf:"bytes,1203,opt,name=auth_role_grant_permission,json=authRoleGrantPermission,proto3" json:"auth_role_grant_permission,omitempty"`
+	AuthRoleRevokePermission *AuthRoleRevokePermissionRequest `protobuf:"bytes,1204,opt,name=auth_role_revoke_permission,json=authRoleRevokePermission,proto3" json:"auth_role_revoke_permission,omitempty"`
+	XXX_NoUnkeyedLiteral     struct{}                         `json:"-"`
+	XXX_unrecognized         []byte                           `json:"-"`
+	XXX_sizecache            int32                            `json:"-"`
+}
+
+func (m *InternalRaftRequest) Reset()         { *m = InternalRaftRequest{} }
+func (m *InternalRaftRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalRaftRequest) ProtoMessage()    {}
+func (*InternalRaftRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{1}
+}
+func (m *InternalRaftRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InternalRaftRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InternalRaftRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InternalRaftRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InternalRaftRequest.Merge(m, src)
+}
+func (m *InternalRaftRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InternalRaftRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InternalRaftRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalRaftRequest proto.InternalMessageInfo
+
+type EmptyResponse struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EmptyResponse) Reset()         { *m = EmptyResponse{} }
+func (m *EmptyResponse) String() string { return proto.CompactTextString(m) }
+func (*EmptyResponse) ProtoMessage()    {}
+func (*EmptyResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{2}
+}
+func (m *EmptyResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *EmptyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_EmptyResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *EmptyResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EmptyResponse.Merge(m, src)
+}
+func (m *EmptyResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *EmptyResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_EmptyResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EmptyResponse proto.InternalMessageInfo
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+type InternalAuthenticateRequest struct {
+	Name     string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	// simple_token is generated in API layer (etcdserver/v3_server.go)
+	SimpleToken          string   `protobuf:"bytes,3,opt,name=simple_token,json=simpleToken,proto3" json:"simple_token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *InternalAuthenticateRequest) Reset()         { *m = InternalAuthenticateRequest{} }
+func (m *InternalAuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*InternalAuthenticateRequest) ProtoMessage()    {}
+func (*InternalAuthenticateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b4c9a9be0cfca103, []int{3}
+}
+func (m *InternalAuthenticateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *InternalAuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_InternalAuthenticateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *InternalAuthenticateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_InternalAuthenticateRequest.Merge(m, src)
+}
+func (m *InternalAuthenticateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *InternalAuthenticateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_InternalAuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_InternalAuthenticateRequest proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterType((*RequestHeader)(nil), "etcdserverpb.RequestHeader")
+	proto.RegisterType((*InternalRaftRequest)(nil), "etcdserverpb.InternalRaftRequest")
+	proto.RegisterType((*EmptyResponse)(nil), "etcdserverpb.EmptyResponse")
+	proto.RegisterType((*InternalAuthenticateRequest)(nil), "etcdserverpb.InternalAuthenticateRequest")
+}
+
+func init() { proto.RegisterFile("raft_internal.proto", fileDescriptor_b4c9a9be0cfca103) }
+
+var fileDescriptor_b4c9a9be0cfca103 = []byte{
+	// 840 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x96, 0xdb, 0x4e, 0xdb, 0x48,
+	0x18, 0xc7, 0x71, 0x38, 0x66, 0x12, 0xb2, 0xec, 0x00, 0xbb, 0xb3, 0x41, 0xca, 0x86, 0xa0, 0xdd,
+	0x65, 0x77, 0x5b, 0x5a, 0x85, 0x07, 0x68, 0x53, 0x82, 0x00, 0x09, 0x21, 0x64, 0x51, 0xa9, 0x52,
+	0x2f, 0xdc, 0x21, 0xfe, 0x48, 0x5c, 0x1c, 0xdb, 0x1d, 0x4f, 0x52, 0xfa, 0x26, 0x7d, 0x8c, 0x9e,
+	0x1e, 0x82, 0x8b, 0x1e, 0x68, 0xfb, 0x02, 0x2d, 0xbd, 0xe9, 0x55, 0x6f, 0xda, 0x07, 0xa8, 0xe6,
+	0x60, 0x3b, 0x4e, 0x1c, 0xee, 0xec, 0x6f, 0xfe, 0xdf, 0xef, 0xfb, 0x0f, 0xf3, 0x37, 0x13, 0xb4,
+	0xc8, 0xe8, 0x09, 0xb7, 0x1c, 0x8f, 0x03, 0xf3, 0xa8, 0xbb, 0x11, 0x30, 0x9f, 0xfb, 0xb8, 0x08,
+	0xbc, 0x65, 0x87, 0xc0, 0xfa, 0xc0, 0x82, 0xe3, 0xf2, 0x52, 0xdb, 0x6f, 0xfb, 0x72, 0xe1, 0x86,
+	0x78, 0x52, 0x9a, 0xf2, 0x42, 0xa2, 0xd1, 0x95, 0x3c, 0x0b, 0x5a, 0xea, 0xb1, 0xf6, 0x00, 0xcd,
+	0x9b, 0xf0, 0xa8, 0x07, 0x21, 0xdf, 0x05, 0x6a, 0x03, 0xc3, 0x25, 0x94, 0xdb, 0x6b, 0x12, 0xa3,
+	0x6a, 0xac, 0x4f, 0x99, 0xb9, 0xbd, 0x26, 0x2e, 0xa3, 0xb9, 0x5e, 0x28, 0x46, 0x76, 0x81, 0xe4,
+	0xaa, 0xc6, 0x7a, 0xde, 0x8c, 0xdf, 0xf1, 0x1a, 0x9a, 0xa7, 0x3d, 0xde, 0xb1, 0x18, 0xf4, 0x9d,
+	0xd0, 0xf1, 0x3d, 0x32, 0x29, 0xdb, 0x8a, 0xa2, 0x68, 0xea, 0x5a, 0xed, 0x5b, 0x09, 0x2d, 0xee,
+	0x69, 0xd7, 0x26, 0x3d, 0xe1, 0x7a, 0x1c, 0xde, 0x44, 0x33, 0x1d, 0x39, 0x92, 0xd8, 0x55, 0x63,
+	0xbd, 0x50, 0x5f, 0xd9, 0x18, 0xdc, 0xcb, 0x46, 0xca, 0x95, 0xa9, 0xa5, 0x23, 0xee, 0xfe, 0x42,
+	0xb9, 0x7e, 0x5d, 0xfa, 0x2a, 0xd4, 0x97, 0x33, 0x01, 0x66, 0xae, 0x5f, 0xc7, 0x37, 0xd1, 0x34,
+	0xa3, 0x5e, 0x1b, 0xa4, 0xc1, 0x42, 0xbd, 0x3c, 0xa4, 0x14, 0x4b, 0x91, 0x5c, 0x09, 0xf1, 0x7f,
+	0x68, 0x32, 0xe8, 0x71, 0x32, 0x25, 0xf5, 0x24, 0xad, 0x3f, 0xec, 0x45, 0x9b, 0x30, 0x85, 0x08,
+	0x6f, 0xa1, 0xa2, 0x0d, 0x2e, 0x70, 0xb0, 0xd4, 0x90, 0x69, 0xd9, 0x54, 0x4d, 0x37, 0x35, 0xa5,
+	0x22, 0x35, 0xaa, 0x60, 0x27, 0x35, 0x31, 0x90, 0x9f, 0x79, 0x64, 0x26, 0x6b, 0xe0, 0xd1, 0x99,
+	0x17, 0x0f, 0xe4, 0x67, 0x1e, 0xbe, 0x85, 0x50, 0xcb, 0xef, 0x06, 0xb4, 0xc5, 0xc5, 0x1f, 0x7d,
+	0x56, 0xb6, 0xfc, 0x99, 0x6e, 0xd9, 0x8a, 0xd7, 0xa3, 0xce, 0x81, 0x16, 0x7c, 0x1b, 0x15, 0x5c,
+	0xa0, 0x21, 0x58, 0x6d, 0x46, 0x3d, 0x4e, 0xe6, 0xb2, 0x08, 0xfb, 0x42, 0xb0, 0x23, 0xd6, 0x63,
+	0x82, 0x1b, 0x97, 0xc4, 0x9e, 0x15, 0x81, 0x41, 0xdf, 0x3f, 0x05, 0x92, 0xcf, 0xda, 0xb3, 0x44,
+	0x98, 0x52, 0x10, 0xef, 0xd9, 0x4d, 0x6a, 0xe2, 0x58, 0xa8, 0x4b, 0x59, 0x97, 0xa0, 0xac, 0x63,
+	0x69, 0x88, 0xa5, 0xf8, 0x58, 0xa4, 0x10, 0x37, 0x50, 0x41, 0x26, 0x0e, 0x3c, 0x7a, 0xec, 0x02,
+	0xf9, 0x9a, 0xb9, 0xf7, 0x46, 0x8f, 0x77, 0xb6, 0xa5, 0x20, 0x76, 0x4e, 0xe3, 0x12, 0x6e, 0x22,
+	0x99, 0x4f, 0xcb, 0x76, 0x42, 0xc9, 0xf8, 0x3e, 0x9b, 0x65, 0x5d, 0x30, 0x9a, 0x4a, 0x11, 0x5b,
+	0xa7, 0x49, 0x0d, 0x1f, 0x28, 0x0a, 0x78, 0xdc, 0x69, 0x51, 0x0e, 0xe4, 0x87, 0xa2, 0xfc, 0x9b,
+	0xa6, 0x44, 0xb9, 0x6f, 0x0c, 0x48, 0x23, 0x5c, 0xaa, 0x1f, 0x6f, 0xeb, 0x4f, 0x49, 0x7c, 0x5b,
+	0x16, 0xb5, 0x6d, 0xf2, 0x7a, 0x6e, 0x9c, 0xad, 0xbb, 0x21, 0xb0, 0x86, 0x6d, 0xa7, 0x6c, 0xe9,
+	0x1a, 0x3e, 0x40, 0x0b, 0x09, 0x46, 0xc5, 0x8b, 0xbc, 0x51, 0xa4, 0xb5, 0x6c, 0x92, 0xce, 0xa5,
+	0x86, 0x95, 0x68, 0xaa, 0x9c, 0xb6, 0xd5, 0x06, 0x4e, 0xde, 0x5e, 0x69, 0x6b, 0x07, 0xf8, 0x88,
+	0xad, 0x1d, 0xe0, 0xb8, 0x8d, 0xfe, 0x48, 0x30, 0xad, 0x8e, 0x08, 0xbc, 0x15, 0xd0, 0x30, 0x7c,
+	0xec, 0x33, 0x9b, 0xbc, 0x53, 0xc8, 0xff, 0xb3, 0x91, 0x5b, 0x52, 0x7d, 0xa8, 0xc5, 0x11, 0xfd,
+	0x37, 0x9a, 0xb9, 0x8c, 0xef, 0xa1, 0xa5, 0x01, 0xbf, 0x22, 0xa9, 0x16, 0xf3, 0x5d, 0x20, 0x17,
+	0x6a, 0xc6, 0xdf, 0x63, 0x6c, 0xcb, 0x94, 0xfb, 0xc9, 0x51, 0xff, 0x4a, 0x87, 0x57, 0xf0, 0x7d,
+	0xb4, 0x9c, 0x90, 0x55, 0xe8, 0x15, 0xfa, 0xbd, 0x42, 0xff, 0x93, 0x8d, 0xd6, 0xe9, 0x1f, 0x60,
+	0x63, 0x3a, 0xb2, 0x84, 0x77, 0x51, 0x29, 0x81, 0xbb, 0x4e, 0xc8, 0xc9, 0x07, 0x45, 0x5d, 0xcd,
+	0xa6, 0xee, 0x3b, 0x21, 0x4f, 0xe5, 0x28, 0x2a, 0xc6, 0x24, 0x61, 0x4d, 0x91, 0x3e, 0x8e, 0x25,
+	0x89, 0xd1, 0x23, 0xa4, 0xa8, 0x18, 0x1f, 0xbd, 0x24, 0x89, 0x44, 0x3e, 0xcb, 0x8f, 0x3b, 0x7a,
+	0xd1, 0x33, 0x9c, 0x48, 0x5d, 0x8b, 0x13, 0x29, 0x31, 0x3a, 0x91, 0xcf, 0xf3, 0xe3, 0x12, 0x29,
+	0xba, 0x32, 0x12, 0x99, 0x94, 0xd3, 0xb6, 0x44, 0x22, 0x5f, 0x5c, 0x69, 0x6b, 0x38, 0x91, 0xba,
+	0x86, 0x1f, 0xa2, 0xf2, 0x00, 0x46, 0x06, 0x25, 0x00, 0xd6, 0x75, 0x42, 0x79, 0x8f, 0xbd, 0x54,
+	0xcc, 0x6b, 0x63, 0x98, 0x42, 0x7e, 0x18, 0xab, 0x23, 0xfe, 0xef, 0x34, 0x7b, 0x1d, 0x77, 0xd1,
+	0x4a, 0x32, 0x4b, 0x47, 0x67, 0x60, 0xd8, 0x2b, 0x35, 0xec, 0x7a, 0xf6, 0x30, 0x95, 0x92, 0xd1,
+	0x69, 0x84, 0x8e, 0x11, 0xd4, 0x7e, 0x41, 0xf3, 0xdb, 0xdd, 0x80, 0x3f, 0x31, 0x21, 0x0c, 0x7c,
+	0x2f, 0x84, 0x5a, 0x80, 0x56, 0xae, 0xf8, 0x47, 0x84, 0x31, 0x9a, 0x92, 0xb7, 0xbb, 0x21, 0x6f,
+	0x77, 0xf9, 0x2c, 0x6e, 0xfd, 0xf8, 0xfb, 0xd4, 0xb7, 0x7e, 0xf4, 0x8e, 0x57, 0x51, 0x31, 0x74,
+	0xba, 0x81, 0x0b, 0x16, 0xf7, 0x4f, 0x41, 0x5d, 0xfa, 0x79, 0xb3, 0xa0, 0x6a, 0x47, 0xa2, 0x74,
+	0x67, 0xe9, 0xfc, 0x73, 0x65, 0xe2, 0xfc, 0xb2, 0x62, 0x5c, 0x5c, 0x56, 0x8c, 0x4f, 0x97, 0x15,
+	0xe3, 0xe9, 0x97, 0xca, 0xc4, 0xf1, 0x8c, 0xfc, 0xc9, 0xb1, 0xf9, 0x33, 0x00, 0x00, 0xff, 0xff,
+	0xa0, 0xbb, 0x20, 0x2c, 0xca, 0x08, 0x00, 0x00,
+}
+
+func (m *RequestHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.AuthRevision != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.AuthRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Username) > 0 {
+		i -= len(m.Username)
+		copy(dAtA[i:], m.Username)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Username)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *InternalRaftRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InternalRaftRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalRaftRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.AuthRoleRevokePermission != nil {
+		{
+			size, err := m.AuthRoleRevokePermission.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthRoleGrantPermission != nil {
+		{
+			size, err := m.AuthRoleGrantPermission.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthRoleGet != nil {
+		{
+			size, err := m.AuthRoleGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.AuthRoleDelete != nil {
+		{
+			size, err := m.AuthRoleDelete.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AuthRoleAdd != nil {
+		{
+			size, err := m.AuthRoleAdd.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4b
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.AuthRoleList != nil {
+		{
+			size, err := m.AuthRoleList.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthUserList != nil {
+		{
+			size, err := m.AuthUserList.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x92
+	}
+	if m.AuthUserRevokeRole != nil {
+		{
+			size, err := m.AuthUserRevokeRole.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x8a
+	}
+	if m.AuthUserGrantRole != nil {
+		{
+			size, err := m.AuthUserGrantRole.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x45
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.AuthUserChangePassword != nil {
+		{
+			size, err := m.AuthUserChangePassword.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xfa
+	}
+	if m.AuthUserGet != nil {
+		{
+			size, err := m.AuthUserGet.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xf2
+	}
+	if m.AuthUserDelete != nil {
+		{
+			size, err := m.AuthUserDelete.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xea
+	}
+	if m.AuthUserAdd != nil {
+		{
+			size, err := m.AuthUserAdd.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x44
+		i--
+		dAtA[i] = 0xe2
+	}
+	if m.Authenticate != nil {
+		{
+			size, err := m.Authenticate.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.AuthDisable != nil {
+		{
+			size, err := m.AuthDisable.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3f
+		i--
+		dAtA[i] = 0x9a
+	}
+	if m.AuthEnable != nil {
+		{
+			size, err := m.AuthEnable.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3e
+		i--
+		dAtA[i] = 0xc2
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x6
+		i--
+		dAtA[i] = 0xa2
+	}
+	if m.Alarm != nil {
+		{
+			size, err := m.Alarm.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x52
+	}
+	if m.LeaseRevoke != nil {
+		{
+			size, err := m.LeaseRevoke.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x4a
+	}
+	if m.LeaseGrant != nil {
+		{
+			size, err := m.LeaseGrant.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x42
+	}
+	if m.Compaction != nil {
+		{
+			size, err := m.Compaction.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x3a
+	}
+	if m.Txn != nil {
+		{
+			size, err := m.Txn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.DeleteRange != nil {
+		{
+			size, err := m.DeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Put != nil {
+		{
+			size, err := m.Put.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.Range != nil {
+		{
+			size, err := m.Range.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.V2 != nil {
+		{
+			size, err := m.V2.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRaftInternal(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRaftInternal(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *EmptyResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *EmptyResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *EmptyResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *InternalAuthenticateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *InternalAuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *InternalAuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.SimpleToken) > 0 {
+		i -= len(m.SimpleToken)
+		copy(dAtA[i:], m.SimpleToken)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.SimpleToken)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRaftInternal(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRaftInternal(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRaftInternal(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *RequestHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRaftInternal(uint64(m.ID))
+	}
+	l = len(m.Username)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRevision != 0 {
+		n += 1 + sovRaftInternal(uint64(m.AuthRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *InternalRaftRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRaftInternal(uint64(m.ID))
+	}
+	if m.V2 != nil {
+		l = m.V2.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Range != nil {
+		l = m.Range.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Put != nil {
+		l = m.Put.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.DeleteRange != nil {
+		l = m.DeleteRange.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Txn != nil {
+		l = m.Txn.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Compaction != nil {
+		l = m.Compaction.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseGrant != nil {
+		l = m.LeaseGrant.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.LeaseRevoke != nil {
+		l = m.LeaseRevoke.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Alarm != nil {
+		l = m.Alarm.Size()
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthEnable != nil {
+		l = m.AuthEnable.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthDisable != nil {
+		l = m.AuthDisable.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.Authenticate != nil {
+		l = m.Authenticate.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserAdd != nil {
+		l = m.AuthUserAdd.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserDelete != nil {
+		l = m.AuthUserDelete.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserGet != nil {
+		l = m.AuthUserGet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserChangePassword != nil {
+		l = m.AuthUserChangePassword.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserGrantRole != nil {
+		l = m.AuthUserGrantRole.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserRevokeRole != nil {
+		l = m.AuthUserRevokeRole.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthUserList != nil {
+		l = m.AuthUserList.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleList != nil {
+		l = m.AuthRoleList.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleAdd != nil {
+		l = m.AuthRoleAdd.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleDelete != nil {
+		l = m.AuthRoleDelete.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleGet != nil {
+		l = m.AuthRoleGet.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleGrantPermission != nil {
+		l = m.AuthRoleGrantPermission.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.AuthRoleRevokePermission != nil {
+		l = m.AuthRoleRevokePermission.Size()
+		n += 2 + l + sovRaftInternal(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *EmptyResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *InternalAuthenticateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	l = len(m.SimpleToken)
+	if l > 0 {
+		n += 1 + l + sovRaftInternal(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRaftInternal(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaftInternal(x uint64) (n int) {
+	return sovRaftInternal(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *RequestHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Username = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRevision", wireType)
+			}
+			m.AuthRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.AuthRevision |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InternalRaftRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InternalRaftRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InternalRaftRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field V2", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.V2 == nil {
+				m.V2 = &Request{}
+			}
+			if err := m.V2.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Range", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Range == nil {
+				m.Range = &RangeRequest{}
+			}
+			if err := m.Range.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Put", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Put == nil {
+				m.Put = &PutRequest{}
+			}
+			if err := m.Put.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.DeleteRange == nil {
+				m.DeleteRange = &DeleteRangeRequest{}
+			}
+			if err := m.DeleteRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Txn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Txn == nil {
+				m.Txn = &TxnRequest{}
+			}
+			if err := m.Txn.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Compaction", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Compaction == nil {
+				m.Compaction = &CompactionRequest{}
+			}
+			if err := m.Compaction.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseGrant", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseGrant == nil {
+				m.LeaseGrant = &LeaseGrantRequest{}
+			}
+			if err := m.LeaseGrant.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LeaseRevoke", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.LeaseRevoke == nil {
+				m.LeaseRevoke = &LeaseRevokeRequest{}
+			}
+			if err := m.LeaseRevoke.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Alarm == nil {
+				m.Alarm = &AlarmRequest{}
+			}
+			if err := m.Alarm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 100:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &RequestHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1000:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthEnable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthEnable == nil {
+				m.AuthEnable = &AuthEnableRequest{}
+			}
+			if err := m.AuthEnable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1011:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthDisable", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthDisable == nil {
+				m.AuthDisable = &AuthDisableRequest{}
+			}
+			if err := m.AuthDisable.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1012:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Authenticate", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Authenticate == nil {
+				m.Authenticate = &InternalAuthenticateRequest{}
+			}
+			if err := m.Authenticate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1100:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserAdd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserAdd == nil {
+				m.AuthUserAdd = &AuthUserAddRequest{}
+			}
+			if err := m.AuthUserAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1101:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserDelete", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserDelete == nil {
+				m.AuthUserDelete = &AuthUserDeleteRequest{}
+			}
+			if err := m.AuthUserDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1102:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserGet == nil {
+				m.AuthUserGet = &AuthUserGetRequest{}
+			}
+			if err := m.AuthUserGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1103:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserChangePassword", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserChangePassword == nil {
+				m.AuthUserChangePassword = &AuthUserChangePasswordRequest{}
+			}
+			if err := m.AuthUserChangePassword.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1104:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserGrantRole", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserGrantRole == nil {
+				m.AuthUserGrantRole = &AuthUserGrantRoleRequest{}
+			}
+			if err := m.AuthUserGrantRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1105:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserRevokeRole", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserRevokeRole == nil {
+				m.AuthUserRevokeRole = &AuthUserRevokeRoleRequest{}
+			}
+			if err := m.AuthUserRevokeRole.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1106:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthUserList", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthUserList == nil {
+				m.AuthUserList = &AuthUserListRequest{}
+			}
+			if err := m.AuthUserList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1107:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleList", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleList == nil {
+				m.AuthRoleList = &AuthRoleListRequest{}
+			}
+			if err := m.AuthRoleList.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1200:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleAdd", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleAdd == nil {
+				m.AuthRoleAdd = &AuthRoleAddRequest{}
+			}
+			if err := m.AuthRoleAdd.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1201:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleDelete", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleDelete == nil {
+				m.AuthRoleDelete = &AuthRoleDeleteRequest{}
+			}
+			if err := m.AuthRoleDelete.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1202:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGet", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleGet == nil {
+				m.AuthRoleGet = &AuthRoleGetRequest{}
+			}
+			if err := m.AuthRoleGet.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1203:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleGrantPermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleGrantPermission == nil {
+				m.AuthRoleGrantPermission = &AuthRoleGrantPermissionRequest{}
+			}
+			if err := m.AuthRoleGrantPermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 1204:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field AuthRoleRevokePermission", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.AuthRoleRevokePermission == nil {
+				m.AuthRoleRevokePermission = &AuthRoleRevokePermissionRequest{}
+			}
+			if err := m.AuthRoleRevokePermission.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *EmptyResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: EmptyResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: EmptyResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *InternalAuthenticateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: InternalAuthenticateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: InternalAuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SimpleToken", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.SimpleToken = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaftInternal(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaftInternal
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaftInternal(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaftInternal
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaftInternal
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRaftInternal
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRaftInternal
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRaftInternal
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRaftInternal(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRaftInternal
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRaftInternal = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaftInternal   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
new file mode 100644
index 0000000..25d45d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal.proto
@@ -0,0 +1,74 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcdserver.proto";
+import "rpc.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+
+message RequestHeader {
+  uint64 ID = 1;
+  // username is a username that is associated with an auth token of gRPC connection
+  string username = 2;
+  // auth_revision is a revision number of auth.authStore. It is not related to mvcc
+  uint64 auth_revision = 3;
+}
+
+// An InternalRaftRequest is the union of all requests which can be
+// sent via raft.
+message InternalRaftRequest {
+  RequestHeader header = 100;
+  uint64 ID = 1;
+
+  Request v2 = 2;
+
+  RangeRequest range = 3;
+  PutRequest put = 4;
+  DeleteRangeRequest delete_range = 5;
+  TxnRequest txn = 6;
+  CompactionRequest compaction = 7;
+
+  LeaseGrantRequest lease_grant = 8;
+  LeaseRevokeRequest lease_revoke = 9;
+
+  AlarmRequest alarm = 10;
+
+  AuthEnableRequest auth_enable = 1000;
+  AuthDisableRequest auth_disable = 1011;
+
+  InternalAuthenticateRequest authenticate = 1012;
+
+  AuthUserAddRequest auth_user_add = 1100;
+  AuthUserDeleteRequest auth_user_delete = 1101;
+  AuthUserGetRequest auth_user_get = 1102;
+  AuthUserChangePasswordRequest auth_user_change_password = 1103;
+  AuthUserGrantRoleRequest auth_user_grant_role = 1104;
+  AuthUserRevokeRoleRequest auth_user_revoke_role = 1105;
+  AuthUserListRequest auth_user_list = 1106;
+  AuthRoleListRequest auth_role_list = 1107;
+
+  AuthRoleAddRequest auth_role_add = 1200;
+  AuthRoleDeleteRequest auth_role_delete = 1201;
+  AuthRoleGetRequest auth_role_get = 1202;
+  AuthRoleGrantPermissionRequest auth_role_grant_permission = 1203;
+  AuthRoleRevokePermissionRequest auth_role_revoke_permission = 1204;
+}
+
+message EmptyResponse {
+}
+
+// What is the difference between AuthenticateRequest (defined in rpc.proto) and InternalAuthenticateRequest?
+// InternalAuthenticateRequest has a member that is filled by etcdserver and shouldn't be user-facing.
+// For avoiding misusage the field, we have an internal version of AuthenticateRequest.
+message InternalAuthenticateRequest {
+  string name = 1;
+  string password = 2;
+
+  // simple_token is generated in API layer (etcdserver/v3_server.go)
+  string simple_token = 3;
+}
+
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
new file mode 100644
index 0000000..31e121e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/raft_internal_stringer.go
@@ -0,0 +1,183 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package etcdserverpb
+
+import (
+	"fmt"
+	"strings"
+
+	proto "github.com/golang/protobuf/proto"
+)
+
+// InternalRaftStringer implements custom proto Stringer:
+// redact password, replace value fields with value_size fields.
+type InternalRaftStringer struct {
+	Request *InternalRaftRequest
+}
+
+func (as *InternalRaftStringer) String() string {
+	switch {
+	case as.Request.LeaseGrant != nil:
+		return fmt.Sprintf("header:<%s> lease_grant:<ttl:%d-second id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseGrant.TTL,
+			as.Request.LeaseGrant.ID,
+		)
+	case as.Request.LeaseRevoke != nil:
+		return fmt.Sprintf("header:<%s> lease_revoke:<id:%016x>",
+			as.Request.Header.String(),
+			as.Request.LeaseRevoke.ID,
+		)
+	case as.Request.Authenticate != nil:
+		return fmt.Sprintf("header:<%s> authenticate:<name:%s simple_token:%s>",
+			as.Request.Header.String(),
+			as.Request.Authenticate.Name,
+			as.Request.Authenticate.SimpleToken,
+		)
+	case as.Request.AuthUserAdd != nil:
+		return fmt.Sprintf("header:<%s> auth_user_add:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserAdd.Name,
+		)
+	case as.Request.AuthUserChangePassword != nil:
+		return fmt.Sprintf("header:<%s> auth_user_change_password:<name:%s>",
+			as.Request.Header.String(),
+			as.Request.AuthUserChangePassword.Name,
+		)
+	case as.Request.Put != nil:
+		return fmt.Sprintf("header:<%s> put:<%s>",
+			as.Request.Header.String(),
+			NewLoggablePutRequest(as.Request.Put).String(),
+		)
+	case as.Request.Txn != nil:
+		return fmt.Sprintf("header:<%s> txn:<%s>",
+			as.Request.Header.String(),
+			NewLoggableTxnRequest(as.Request.Txn).String(),
+		)
+	default:
+		// nothing to redact
+	}
+	return as.Request.String()
+}
+
+// txnRequestStringer implements a custom proto String to replace value bytes fields with value size
+// fields in any nested txn and put operations.
+type txnRequestStringer struct {
+	Request *TxnRequest
+}
+
+func NewLoggableTxnRequest(request *TxnRequest) *txnRequestStringer {
+	return &txnRequestStringer{request}
+}
+
+func (as *txnRequestStringer) String() string {
+	var compare []string
+	for _, c := range as.Request.Compare {
+		switch cv := c.TargetUnion.(type) {
+		case *Compare_Value:
+			compare = append(compare, newLoggableValueCompare(c, cv).String())
+		default:
+			// nothing to redact
+			compare = append(compare, c.String())
+		}
+	}
+	var success []string
+	for _, s := range as.Request.Success {
+		success = append(success, newLoggableRequestOp(s).String())
+	}
+	var failure []string
+	for _, f := range as.Request.Failure {
+		failure = append(failure, newLoggableRequestOp(f).String())
+	}
+	return fmt.Sprintf("compare:<%s> success:<%s> failure:<%s>",
+		strings.Join(compare, " "),
+		strings.Join(success, " "),
+		strings.Join(failure, " "),
+	)
+}
+
+// requestOpStringer implements a custom proto String to replace value bytes fields with value
+// size fields in any nested txn and put operations.
+type requestOpStringer struct {
+	Op *RequestOp
+}
+
+func newLoggableRequestOp(op *RequestOp) *requestOpStringer {
+	return &requestOpStringer{op}
+}
+
+func (as *requestOpStringer) String() string {
+	switch op := as.Op.Request.(type) {
+	case *RequestOp_RequestPut:
+		return fmt.Sprintf("request_put:<%s>", NewLoggablePutRequest(op.RequestPut).String())
+	case *RequestOp_RequestTxn:
+		return fmt.Sprintf("request_txn:<%s>", NewLoggableTxnRequest(op.RequestTxn).String())
+	default:
+		// nothing to redact
+	}
+	return as.Op.String()
+}
+
+// loggableValueCompare implements a custom proto String for Compare.Value union member types to
+// replace the value bytes field with a value size field.
+// To preserve proto encoding of the key and range_end bytes, a faked out proto type is used here.
+type loggableValueCompare struct {
+	Result    Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult"`
+	Target    Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget"`
+	Key       []byte                `protobuf:"bytes,3,opt,name=key,proto3"`
+	ValueSize int64                 `protobuf:"varint,7,opt,name=value_size,proto3"`
+	RangeEnd  []byte                `protobuf:"bytes,64,opt,name=range_end,proto3"`
+}
+
+func newLoggableValueCompare(c *Compare, cv *Compare_Value) *loggableValueCompare {
+	return &loggableValueCompare{
+		c.Result,
+		c.Target,
+		c.Key,
+		int64(len(cv.Value)),
+		c.RangeEnd,
+	}
+}
+
+func (m *loggableValueCompare) Reset()         { *m = loggableValueCompare{} }
+func (m *loggableValueCompare) String() string { return proto.CompactTextString(m) }
+func (*loggableValueCompare) ProtoMessage()    {}
+
+// loggablePutRequest implements a custom proto String to replace value bytes field with a value
+// size field.
+// To preserve proto encoding of the key bytes, a faked out proto type is used here.
+type loggablePutRequest struct {
+	Key         []byte `protobuf:"bytes,1,opt,name=key,proto3"`
+	ValueSize   int64  `protobuf:"varint,2,opt,name=value_size,proto3"`
+	Lease       int64  `protobuf:"varint,3,opt,name=lease,proto3"`
+	PrevKv      bool   `protobuf:"varint,4,opt,name=prev_kv,proto3"`
+	IgnoreValue bool   `protobuf:"varint,5,opt,name=ignore_value,proto3"`
+	IgnoreLease bool   `protobuf:"varint,6,opt,name=ignore_lease,proto3"`
+}
+
+func NewLoggablePutRequest(request *PutRequest) *loggablePutRequest {
+	return &loggablePutRequest{
+		request.Key,
+		int64(len(request.Value)),
+		request.Lease,
+		request.PrevKv,
+		request.IgnoreValue,
+		request.IgnoreLease,
+	}
+}
+
+func (m *loggablePutRequest) Reset()         { *m = loggablePutRequest{} }
+func (m *loggablePutRequest) String() string { return proto.CompactTextString(m) }
+func (*loggablePutRequest) ProtoMessage()    {}
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
new file mode 100644
index 0000000..a0cff8f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.pb.go
@@ -0,0 +1,24010 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: rpc.proto
+
+package etcdserverpb
+
+import (
+	context "context"
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	authpb "github.com/coreos/etcd/auth/authpb"
+	mvccpb "github.com/coreos/etcd/mvcc/mvccpb"
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type AlarmType int32
+
+const (
+	AlarmType_NONE    AlarmType = 0
+	AlarmType_NOSPACE AlarmType = 1
+	AlarmType_CORRUPT AlarmType = 2
+)
+
+var AlarmType_name = map[int32]string{
+	0: "NONE",
+	1: "NOSPACE",
+	2: "CORRUPT",
+}
+
+var AlarmType_value = map[string]int32{
+	"NONE":    0,
+	"NOSPACE": 1,
+	"CORRUPT": 2,
+}
+
+func (x AlarmType) String() string {
+	return proto.EnumName(AlarmType_name, int32(x))
+}
+
+func (AlarmType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+
+type RangeRequest_SortOrder int32
+
+const (
+	RangeRequest_NONE    RangeRequest_SortOrder = 0
+	RangeRequest_ASCEND  RangeRequest_SortOrder = 1
+	RangeRequest_DESCEND RangeRequest_SortOrder = 2
+)
+
+var RangeRequest_SortOrder_name = map[int32]string{
+	0: "NONE",
+	1: "ASCEND",
+	2: "DESCEND",
+}
+
+var RangeRequest_SortOrder_value = map[string]int32{
+	"NONE":    0,
+	"ASCEND":  1,
+	"DESCEND": 2,
+}
+
+func (x RangeRequest_SortOrder) String() string {
+	return proto.EnumName(RangeRequest_SortOrder_name, int32(x))
+}
+
+func (RangeRequest_SortOrder) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1, 0}
+}
+
+type RangeRequest_SortTarget int32
+
+const (
+	RangeRequest_KEY     RangeRequest_SortTarget = 0
+	RangeRequest_VERSION RangeRequest_SortTarget = 1
+	RangeRequest_CREATE  RangeRequest_SortTarget = 2
+	RangeRequest_MOD     RangeRequest_SortTarget = 3
+	RangeRequest_VALUE   RangeRequest_SortTarget = 4
+)
+
+var RangeRequest_SortTarget_name = map[int32]string{
+	0: "KEY",
+	1: "VERSION",
+	2: "CREATE",
+	3: "MOD",
+	4: "VALUE",
+}
+
+var RangeRequest_SortTarget_value = map[string]int32{
+	"KEY":     0,
+	"VERSION": 1,
+	"CREATE":  2,
+	"MOD":     3,
+	"VALUE":   4,
+}
+
+func (x RangeRequest_SortTarget) String() string {
+	return proto.EnumName(RangeRequest_SortTarget_name, int32(x))
+}
+
+func (RangeRequest_SortTarget) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1, 1}
+}
+
+type Compare_CompareResult int32
+
+const (
+	Compare_EQUAL     Compare_CompareResult = 0
+	Compare_GREATER   Compare_CompareResult = 1
+	Compare_LESS      Compare_CompareResult = 2
+	Compare_NOT_EQUAL Compare_CompareResult = 3
+)
+
+var Compare_CompareResult_name = map[int32]string{
+	0: "EQUAL",
+	1: "GREATER",
+	2: "LESS",
+	3: "NOT_EQUAL",
+}
+
+var Compare_CompareResult_value = map[string]int32{
+	"EQUAL":     0,
+	"GREATER":   1,
+	"LESS":      2,
+	"NOT_EQUAL": 3,
+}
+
+func (x Compare_CompareResult) String() string {
+	return proto.EnumName(Compare_CompareResult_name, int32(x))
+}
+
+func (Compare_CompareResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9, 0}
+}
+
+type Compare_CompareTarget int32
+
+const (
+	Compare_VERSION Compare_CompareTarget = 0
+	Compare_CREATE  Compare_CompareTarget = 1
+	Compare_MOD     Compare_CompareTarget = 2
+	Compare_VALUE   Compare_CompareTarget = 3
+	Compare_LEASE   Compare_CompareTarget = 4
+)
+
+var Compare_CompareTarget_name = map[int32]string{
+	0: "VERSION",
+	1: "CREATE",
+	2: "MOD",
+	3: "VALUE",
+	4: "LEASE",
+}
+
+var Compare_CompareTarget_value = map[string]int32{
+	"VERSION": 0,
+	"CREATE":  1,
+	"MOD":     2,
+	"VALUE":   3,
+	"LEASE":   4,
+}
+
+func (x Compare_CompareTarget) String() string {
+	return proto.EnumName(Compare_CompareTarget_name, int32(x))
+}
+
+func (Compare_CompareTarget) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9, 1}
+}
+
+type WatchCreateRequest_FilterType int32
+
+const (
+	// filter out put event.
+	WatchCreateRequest_NOPUT WatchCreateRequest_FilterType = 0
+	// filter out delete event.
+	WatchCreateRequest_NODELETE WatchCreateRequest_FilterType = 1
+)
+
+var WatchCreateRequest_FilterType_name = map[int32]string{
+	0: "NOPUT",
+	1: "NODELETE",
+}
+
+var WatchCreateRequest_FilterType_value = map[string]int32{
+	"NOPUT":    0,
+	"NODELETE": 1,
+}
+
+func (x WatchCreateRequest_FilterType) String() string {
+	return proto.EnumName(WatchCreateRequest_FilterType_name, int32(x))
+}
+
+func (WatchCreateRequest_FilterType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{21, 0}
+}
+
+type AlarmRequest_AlarmAction int32
+
+const (
+	AlarmRequest_GET        AlarmRequest_AlarmAction = 0
+	AlarmRequest_ACTIVATE   AlarmRequest_AlarmAction = 1
+	AlarmRequest_DEACTIVATE AlarmRequest_AlarmAction = 2
+)
+
+var AlarmRequest_AlarmAction_name = map[int32]string{
+	0: "GET",
+	1: "ACTIVATE",
+	2: "DEACTIVATE",
+}
+
+var AlarmRequest_AlarmAction_value = map[string]int32{
+	"GET":        0,
+	"ACTIVATE":   1,
+	"DEACTIVATE": 2,
+}
+
+func (x AlarmRequest_AlarmAction) String() string {
+	return proto.EnumName(AlarmRequest_AlarmAction_name, int32(x))
+}
+
+func (AlarmRequest_AlarmAction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{49, 0}
+}
+
+type ResponseHeader struct {
+	// cluster_id is the ID of the cluster which sent the response.
+	ClusterId uint64 `protobuf:"varint,1,opt,name=cluster_id,json=clusterId,proto3" json:"cluster_id,omitempty"`
+	// member_id is the ID of the member which sent the response.
+	MemberId uint64 `protobuf:"varint,2,opt,name=member_id,json=memberId,proto3" json:"member_id,omitempty"`
+	// revision is the key-value store revision when the request was applied.
+	// For watch progress responses, the header.revision indicates progress. All future events
+	// recieved in this stream are guaranteed to have a higher revision number than the
+	// header.revision number.
+	Revision int64 `protobuf:"varint,3,opt,name=revision,proto3" json:"revision,omitempty"`
+	// raft_term is the raft term when the request was applied.
+	RaftTerm             uint64   `protobuf:"varint,4,opt,name=raft_term,json=raftTerm,proto3" json:"raft_term,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ResponseHeader) Reset()         { *m = ResponseHeader{} }
+func (m *ResponseHeader) String() string { return proto.CompactTextString(m) }
+func (*ResponseHeader) ProtoMessage()    {}
+func (*ResponseHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{0}
+}
+func (m *ResponseHeader) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResponseHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResponseHeader.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResponseHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResponseHeader.Merge(m, src)
+}
+func (m *ResponseHeader) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResponseHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResponseHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseHeader proto.InternalMessageInfo
+
+func (m *ResponseHeader) GetClusterId() uint64 {
+	if m != nil {
+		return m.ClusterId
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetMemberId() uint64 {
+	if m != nil {
+		return m.MemberId
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *ResponseHeader) GetRaftTerm() uint64 {
+	if m != nil {
+		return m.RaftTerm
+	}
+	return 0
+}
+
+type RangeRequest struct {
+	// key is the first key for the range. If range_end is not given, the request only looks up key.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the upper bound on the requested range [key, range_end).
+	// If range_end is '\0', the range is all keys >= key.
+	// If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+	// then the range request gets all keys prefixed with key.
+	// If both key and range_end are '\0', then the range request returns all keys.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// limit is a limit on the number of keys returned for the request. When limit is set to 0,
+	// it is treated as no limit.
+	Limit int64 `protobuf:"varint,3,opt,name=limit,proto3" json:"limit,omitempty"`
+	// revision is the point-in-time of the key-value store to use for the range.
+	// If revision is less or equal to zero, the range is over the newest key-value store.
+	// If the revision has been compacted, ErrCompacted is returned as a response.
+	Revision int64 `protobuf:"varint,4,opt,name=revision,proto3" json:"revision,omitempty"`
+	// sort_order is the order for returned sorted results.
+	SortOrder RangeRequest_SortOrder `protobuf:"varint,5,opt,name=sort_order,json=sortOrder,proto3,enum=etcdserverpb.RangeRequest_SortOrder" json:"sort_order,omitempty"`
+	// sort_target is the key-value field to use for sorting.
+	SortTarget RangeRequest_SortTarget `protobuf:"varint,6,opt,name=sort_target,json=sortTarget,proto3,enum=etcdserverpb.RangeRequest_SortTarget" json:"sort_target,omitempty"`
+	// serializable sets the range request to use serializable member-local reads.
+	// Range requests are linearizable by default; linearizable requests have higher
+	// latency and lower throughput than serializable requests but reflect the current
+	// consensus of the cluster. For better performance, in exchange for possible stale reads,
+	// a serializable range request is served locally without needing to reach consensus
+	// with other nodes in the cluster.
+	Serializable bool `protobuf:"varint,7,opt,name=serializable,proto3" json:"serializable,omitempty"`
+	// keys_only when set returns only the keys and not the values.
+	KeysOnly bool `protobuf:"varint,8,opt,name=keys_only,json=keysOnly,proto3" json:"keys_only,omitempty"`
+	// count_only when set returns only the count of the keys in the range.
+	CountOnly bool `protobuf:"varint,9,opt,name=count_only,json=countOnly,proto3" json:"count_only,omitempty"`
+	// min_mod_revision is the lower bound for returned key mod revisions; all keys with
+	// lesser mod revisions will be filtered away.
+	MinModRevision int64 `protobuf:"varint,10,opt,name=min_mod_revision,json=minModRevision,proto3" json:"min_mod_revision,omitempty"`
+	// max_mod_revision is the upper bound for returned key mod revisions; all keys with
+	// greater mod revisions will be filtered away.
+	MaxModRevision int64 `protobuf:"varint,11,opt,name=max_mod_revision,json=maxModRevision,proto3" json:"max_mod_revision,omitempty"`
+	// min_create_revision is the lower bound for returned key create revisions; all keys with
+	// lesser create trevisions will be filtered away.
+	MinCreateRevision int64 `protobuf:"varint,12,opt,name=min_create_revision,json=minCreateRevision,proto3" json:"min_create_revision,omitempty"`
+	// max_create_revision is the upper bound for returned key create revisions; all keys with
+	// greater create revisions will be filtered away.
+	MaxCreateRevision    int64    `protobuf:"varint,13,opt,name=max_create_revision,json=maxCreateRevision,proto3" json:"max_create_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RangeRequest) Reset()         { *m = RangeRequest{} }
+func (m *RangeRequest) String() string { return proto.CompactTextString(m) }
+func (*RangeRequest) ProtoMessage()    {}
+func (*RangeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{1}
+}
+func (m *RangeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RangeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RangeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeRequest.Merge(m, src)
+}
+func (m *RangeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeRequest proto.InternalMessageInfo
+
+func (m *RangeRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *RangeRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *RangeRequest) GetLimit() int64 {
+	if m != nil {
+		return m.Limit
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetSortOrder() RangeRequest_SortOrder {
+	if m != nil {
+		return m.SortOrder
+	}
+	return RangeRequest_NONE
+}
+
+func (m *RangeRequest) GetSortTarget() RangeRequest_SortTarget {
+	if m != nil {
+		return m.SortTarget
+	}
+	return RangeRequest_KEY
+}
+
+func (m *RangeRequest) GetSerializable() bool {
+	if m != nil {
+		return m.Serializable
+	}
+	return false
+}
+
+func (m *RangeRequest) GetKeysOnly() bool {
+	if m != nil {
+		return m.KeysOnly
+	}
+	return false
+}
+
+func (m *RangeRequest) GetCountOnly() bool {
+	if m != nil {
+		return m.CountOnly
+	}
+	return false
+}
+
+func (m *RangeRequest) GetMinModRevision() int64 {
+	if m != nil {
+		return m.MinModRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMaxModRevision() int64 {
+	if m != nil {
+		return m.MaxModRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMinCreateRevision() int64 {
+	if m != nil {
+		return m.MinCreateRevision
+	}
+	return 0
+}
+
+func (m *RangeRequest) GetMaxCreateRevision() int64 {
+	if m != nil {
+		return m.MaxCreateRevision
+	}
+	return 0
+}
+
+type RangeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// kvs is the list of key-value pairs matched by the range request.
+	// kvs is empty when count is requested.
+	Kvs []*mvccpb.KeyValue `protobuf:"bytes,2,rep,name=kvs,proto3" json:"kvs,omitempty"`
+	// more indicates if there are more keys to return in the requested range.
+	More bool `protobuf:"varint,3,opt,name=more,proto3" json:"more,omitempty"`
+	// count is set to the number of keys within the range when requested.
+	Count                int64    `protobuf:"varint,4,opt,name=count,proto3" json:"count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *RangeResponse) Reset()         { *m = RangeResponse{} }
+func (m *RangeResponse) String() string { return proto.CompactTextString(m) }
+func (*RangeResponse) ProtoMessage()    {}
+func (*RangeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{2}
+}
+func (m *RangeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RangeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RangeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RangeResponse.Merge(m, src)
+}
+func (m *RangeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *RangeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_RangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RangeResponse proto.InternalMessageInfo
+
+func (m *RangeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *RangeResponse) GetKvs() []*mvccpb.KeyValue {
+	if m != nil {
+		return m.Kvs
+	}
+	return nil
+}
+
+func (m *RangeResponse) GetMore() bool {
+	if m != nil {
+		return m.More
+	}
+	return false
+}
+
+func (m *RangeResponse) GetCount() int64 {
+	if m != nil {
+		return m.Count
+	}
+	return 0
+}
+
+type PutRequest struct {
+	// key is the key, in bytes, to put into the key-value store.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// value is the value, in bytes, to associate with the key in the key-value store.
+	Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	// lease is the lease ID to associate with the key in the key-value store. A lease
+	// value of 0 indicates no lease.
+	Lease int64 `protobuf:"varint,3,opt,name=lease,proto3" json:"lease,omitempty"`
+	// If prev_kv is set, etcd gets the previous key-value pair before changing it.
+	// The previous key-value pair will be returned in the put response.
+	PrevKv bool `protobuf:"varint,4,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	// If ignore_value is set, etcd updates the key using its current value.
+	// Returns an error if the key does not exist.
+	IgnoreValue bool `protobuf:"varint,5,opt,name=ignore_value,json=ignoreValue,proto3" json:"ignore_value,omitempty"`
+	// If ignore_lease is set, etcd updates the key using its current lease.
+	// Returns an error if the key does not exist.
+	IgnoreLease          bool     `protobuf:"varint,6,opt,name=ignore_lease,json=ignoreLease,proto3" json:"ignore_lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *PutRequest) Reset()         { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage()    {}
+func (*PutRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{3}
+}
+func (m *PutRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PutRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PutRequest.Merge(m, src)
+}
+func (m *PutRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *PutRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_PutRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutRequest proto.InternalMessageInfo
+
+func (m *PutRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *PutRequest) GetValue() []byte {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *PutRequest) GetLease() int64 {
+	if m != nil {
+		return m.Lease
+	}
+	return 0
+}
+
+func (m *PutRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+func (m *PutRequest) GetIgnoreValue() bool {
+	if m != nil {
+		return m.IgnoreValue
+	}
+	return false
+}
+
+func (m *PutRequest) GetIgnoreLease() bool {
+	if m != nil {
+		return m.IgnoreLease
+	}
+	return false
+}
+
+type PutResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// if prev_kv is set in the request, the previous key-value pair will be returned.
+	PrevKv               *mvccpb.KeyValue `protobuf:"bytes,2,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *PutResponse) Reset()         { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage()    {}
+func (*PutResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{4}
+}
+func (m *PutResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *PutResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PutResponse.Merge(m, src)
+}
+func (m *PutResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *PutResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_PutResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PutResponse proto.InternalMessageInfo
+
+func (m *PutResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *PutResponse) GetPrevKv() *mvccpb.KeyValue {
+	if m != nil {
+		return m.PrevKv
+	}
+	return nil
+}
+
+type DeleteRangeRequest struct {
+	// key is the first key to delete in the range.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the key following the last key to delete for the range [key, range_end).
+	// If range_end is not given, the range is defined to contain only the key argument.
+	// If range_end is one bit larger than the given key, then the range is all the keys
+	// with the prefix (the given key).
+	// If range_end is '\0', the range is all keys greater than or equal to the key argument.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+	// The previous key-value pairs will be returned in the delete response.
+	PrevKv               bool     `protobuf:"varint,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeleteRangeRequest) Reset()         { *m = DeleteRangeRequest{} }
+func (m *DeleteRangeRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeRequest) ProtoMessage()    {}
+func (*DeleteRangeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{5}
+}
+func (m *DeleteRangeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRangeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRangeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRangeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRangeRequest.Merge(m, src)
+}
+func (m *DeleteRangeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRangeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRangeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeRequest proto.InternalMessageInfo
+
+func (m *DeleteRangeRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *DeleteRangeRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *DeleteRangeRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+type DeleteRangeResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// deleted is the number of keys deleted by the delete range request.
+	Deleted int64 `protobuf:"varint,2,opt,name=deleted,proto3" json:"deleted,omitempty"`
+	// if prev_kv is set in the request, the previous key-value pairs will be returned.
+	PrevKvs              []*mvccpb.KeyValue `protobuf:"bytes,3,rep,name=prev_kvs,json=prevKvs,proto3" json:"prev_kvs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *DeleteRangeResponse) Reset()         { *m = DeleteRangeResponse{} }
+func (m *DeleteRangeResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteRangeResponse) ProtoMessage()    {}
+func (*DeleteRangeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{6}
+}
+func (m *DeleteRangeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DeleteRangeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DeleteRangeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DeleteRangeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeleteRangeResponse.Merge(m, src)
+}
+func (m *DeleteRangeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DeleteRangeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeleteRangeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeleteRangeResponse proto.InternalMessageInfo
+
+func (m *DeleteRangeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *DeleteRangeResponse) GetDeleted() int64 {
+	if m != nil {
+		return m.Deleted
+	}
+	return 0
+}
+
+func (m *DeleteRangeResponse) GetPrevKvs() []*mvccpb.KeyValue {
+	if m != nil {
+		return m.PrevKvs
+	}
+	return nil
+}
+
+type RequestOp struct {
+	// request is a union of request types accepted by a transaction.
+	//
+	// Types that are valid to be assigned to Request:
+	//	*RequestOp_RequestRange
+	//	*RequestOp_RequestPut
+	//	*RequestOp_RequestDeleteRange
+	//	*RequestOp_RequestTxn
+	Request              isRequestOp_Request `protobuf_oneof:"request"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *RequestOp) Reset()         { *m = RequestOp{} }
+func (m *RequestOp) String() string { return proto.CompactTextString(m) }
+func (*RequestOp) ProtoMessage()    {}
+func (*RequestOp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{7}
+}
+func (m *RequestOp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *RequestOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_RequestOp.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *RequestOp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RequestOp.Merge(m, src)
+}
+func (m *RequestOp) XXX_Size() int {
+	return m.Size()
+}
+func (m *RequestOp) XXX_DiscardUnknown() {
+	xxx_messageInfo_RequestOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RequestOp proto.InternalMessageInfo
+
+type isRequestOp_Request interface {
+	isRequestOp_Request()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type RequestOp_RequestRange struct {
+	RequestRange *RangeRequest `protobuf:"bytes,1,opt,name=request_range,json=requestRange,proto3,oneof"`
+}
+type RequestOp_RequestPut struct {
+	RequestPut *PutRequest `protobuf:"bytes,2,opt,name=request_put,json=requestPut,proto3,oneof"`
+}
+type RequestOp_RequestDeleteRange struct {
+	RequestDeleteRange *DeleteRangeRequest `protobuf:"bytes,3,opt,name=request_delete_range,json=requestDeleteRange,proto3,oneof"`
+}
+type RequestOp_RequestTxn struct {
+	RequestTxn *TxnRequest `protobuf:"bytes,4,opt,name=request_txn,json=requestTxn,proto3,oneof"`
+}
+
+func (*RequestOp_RequestRange) isRequestOp_Request()       {}
+func (*RequestOp_RequestPut) isRequestOp_Request()         {}
+func (*RequestOp_RequestDeleteRange) isRequestOp_Request() {}
+func (*RequestOp_RequestTxn) isRequestOp_Request()         {}
+
+func (m *RequestOp) GetRequest() isRequestOp_Request {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestRange() *RangeRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestRange); ok {
+		return x.RequestRange
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestPut() *PutRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestPut); ok {
+		return x.RequestPut
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestDeleteRange() *DeleteRangeRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestDeleteRange); ok {
+		return x.RequestDeleteRange
+	}
+	return nil
+}
+
+func (m *RequestOp) GetRequestTxn() *TxnRequest {
+	if x, ok := m.GetRequest().(*RequestOp_RequestTxn); ok {
+		return x.RequestTxn
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*RequestOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _RequestOp_OneofMarshaler, _RequestOp_OneofUnmarshaler, _RequestOp_OneofSizer, []interface{}{
+		(*RequestOp_RequestRange)(nil),
+		(*RequestOp_RequestPut)(nil),
+		(*RequestOp_RequestDeleteRange)(nil),
+		(*RequestOp_RequestTxn)(nil),
+	}
+}
+
+func _RequestOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*RequestOp)
+	// request
+	switch x := m.Request.(type) {
+	case *RequestOp_RequestRange:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestRange); err != nil {
+			return err
+		}
+	case *RequestOp_RequestPut:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestPut); err != nil {
+			return err
+		}
+	case *RequestOp_RequestDeleteRange:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestDeleteRange); err != nil {
+			return err
+		}
+	case *RequestOp_RequestTxn:
+		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.RequestTxn); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("RequestOp.Request has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _RequestOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*RequestOp)
+	switch tag {
+	case 1: // request.request_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(RangeRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestRange{msg}
+		return true, err
+	case 2: // request.request_put
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(PutRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestPut{msg}
+		return true, err
+	case 3: // request.request_delete_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(DeleteRangeRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestDeleteRange{msg}
+		return true, err
+	case 4: // request.request_txn
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(TxnRequest)
+		err := b.DecodeMessage(msg)
+		m.Request = &RequestOp_RequestTxn{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _RequestOp_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*RequestOp)
+	// request
+	switch x := m.Request.(type) {
+	case *RequestOp_RequestRange:
+		s := proto.Size(x.RequestRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *RequestOp_RequestPut:
+		s := proto.Size(x.RequestPut)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *RequestOp_RequestDeleteRange:
+		s := proto.Size(x.RequestDeleteRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *RequestOp_RequestTxn:
+		s := proto.Size(x.RequestTxn)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type ResponseOp struct {
+	// response is a union of response types returned by a transaction.
+	//
+	// Types that are valid to be assigned to Response:
+	//	*ResponseOp_ResponseRange
+	//	*ResponseOp_ResponsePut
+	//	*ResponseOp_ResponseDeleteRange
+	//	*ResponseOp_ResponseTxn
+	Response             isResponseOp_Response `protobuf_oneof:"response"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *ResponseOp) Reset()         { *m = ResponseOp{} }
+func (m *ResponseOp) String() string { return proto.CompactTextString(m) }
+func (*ResponseOp) ProtoMessage()    {}
+func (*ResponseOp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{8}
+}
+func (m *ResponseOp) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ResponseOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ResponseOp.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ResponseOp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ResponseOp.Merge(m, src)
+}
+func (m *ResponseOp) XXX_Size() int {
+	return m.Size()
+}
+func (m *ResponseOp) XXX_DiscardUnknown() {
+	xxx_messageInfo_ResponseOp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ResponseOp proto.InternalMessageInfo
+
+type isResponseOp_Response interface {
+	isResponseOp_Response()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type ResponseOp_ResponseRange struct {
+	ResponseRange *RangeResponse `protobuf:"bytes,1,opt,name=response_range,json=responseRange,proto3,oneof"`
+}
+type ResponseOp_ResponsePut struct {
+	ResponsePut *PutResponse `protobuf:"bytes,2,opt,name=response_put,json=responsePut,proto3,oneof"`
+}
+type ResponseOp_ResponseDeleteRange struct {
+	ResponseDeleteRange *DeleteRangeResponse `protobuf:"bytes,3,opt,name=response_delete_range,json=responseDeleteRange,proto3,oneof"`
+}
+type ResponseOp_ResponseTxn struct {
+	ResponseTxn *TxnResponse `protobuf:"bytes,4,opt,name=response_txn,json=responseTxn,proto3,oneof"`
+}
+
+func (*ResponseOp_ResponseRange) isResponseOp_Response()       {}
+func (*ResponseOp_ResponsePut) isResponseOp_Response()         {}
+func (*ResponseOp_ResponseDeleteRange) isResponseOp_Response() {}
+func (*ResponseOp_ResponseTxn) isResponseOp_Response()         {}
+
+func (m *ResponseOp) GetResponse() isResponseOp_Response {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseRange() *RangeResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseRange); ok {
+		return x.ResponseRange
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponsePut() *PutResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponsePut); ok {
+		return x.ResponsePut
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseDeleteRange() *DeleteRangeResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseDeleteRange); ok {
+		return x.ResponseDeleteRange
+	}
+	return nil
+}
+
+func (m *ResponseOp) GetResponseTxn() *TxnResponse {
+	if x, ok := m.GetResponse().(*ResponseOp_ResponseTxn); ok {
+		return x.ResponseTxn
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*ResponseOp) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _ResponseOp_OneofMarshaler, _ResponseOp_OneofUnmarshaler, _ResponseOp_OneofSizer, []interface{}{
+		(*ResponseOp_ResponseRange)(nil),
+		(*ResponseOp_ResponsePut)(nil),
+		(*ResponseOp_ResponseDeleteRange)(nil),
+		(*ResponseOp_ResponseTxn)(nil),
+	}
+}
+
+func _ResponseOp_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*ResponseOp)
+	// response
+	switch x := m.Response.(type) {
+	case *ResponseOp_ResponseRange:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponseRange); err != nil {
+			return err
+		}
+	case *ResponseOp_ResponsePut:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponsePut); err != nil {
+			return err
+		}
+	case *ResponseOp_ResponseDeleteRange:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponseDeleteRange); err != nil {
+			return err
+		}
+	case *ResponseOp_ResponseTxn:
+		_ = b.EncodeVarint(4<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ResponseTxn); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("ResponseOp.Response has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _ResponseOp_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*ResponseOp)
+	switch tag {
+	case 1: // response.response_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(RangeResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponseRange{msg}
+		return true, err
+	case 2: // response.response_put
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(PutResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponsePut{msg}
+		return true, err
+	case 3: // response.response_delete_range
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(DeleteRangeResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponseDeleteRange{msg}
+		return true, err
+	case 4: // response.response_txn
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(TxnResponse)
+		err := b.DecodeMessage(msg)
+		m.Response = &ResponseOp_ResponseTxn{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _ResponseOp_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*ResponseOp)
+	// response
+	switch x := m.Response.(type) {
+	case *ResponseOp_ResponseRange:
+		s := proto.Size(x.ResponseRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseOp_ResponsePut:
+		s := proto.Size(x.ResponsePut)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseOp_ResponseDeleteRange:
+		s := proto.Size(x.ResponseDeleteRange)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *ResponseOp_ResponseTxn:
+		s := proto.Size(x.ResponseTxn)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type Compare struct {
+	// result is logical comparison operation for this comparison.
+	Result Compare_CompareResult `protobuf:"varint,1,opt,name=result,proto3,enum=etcdserverpb.Compare_CompareResult" json:"result,omitempty"`
+	// target is the key-value field to inspect for the comparison.
+	Target Compare_CompareTarget `protobuf:"varint,2,opt,name=target,proto3,enum=etcdserverpb.Compare_CompareTarget" json:"target,omitempty"`
+	// key is the subject key for the comparison operation.
+	Key []byte `protobuf:"bytes,3,opt,name=key,proto3" json:"key,omitempty"`
+	// Types that are valid to be assigned to TargetUnion:
+	//	*Compare_Version
+	//	*Compare_CreateRevision
+	//	*Compare_ModRevision
+	//	*Compare_Value
+	//	*Compare_Lease
+	TargetUnion isCompare_TargetUnion `protobuf_oneof:"target_union"`
+	// range_end compares the given target to all keys in the range [key, range_end).
+	// See RangeRequest for more details on key ranges.
+	RangeEnd             []byte   `protobuf:"bytes,64,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Compare) Reset()         { *m = Compare{} }
+func (m *Compare) String() string { return proto.CompactTextString(m) }
+func (*Compare) ProtoMessage()    {}
+func (*Compare) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{9}
+}
+func (m *Compare) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Compare) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Compare.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Compare) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Compare.Merge(m, src)
+}
+func (m *Compare) XXX_Size() int {
+	return m.Size()
+}
+func (m *Compare) XXX_DiscardUnknown() {
+	xxx_messageInfo_Compare.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Compare proto.InternalMessageInfo
+
+type isCompare_TargetUnion interface {
+	isCompare_TargetUnion()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type Compare_Version struct {
+	Version int64 `protobuf:"varint,4,opt,name=version,proto3,oneof"`
+}
+type Compare_CreateRevision struct {
+	CreateRevision int64 `protobuf:"varint,5,opt,name=create_revision,json=createRevision,proto3,oneof"`
+}
+type Compare_ModRevision struct {
+	ModRevision int64 `protobuf:"varint,6,opt,name=mod_revision,json=modRevision,proto3,oneof"`
+}
+type Compare_Value struct {
+	Value []byte `protobuf:"bytes,7,opt,name=value,proto3,oneof"`
+}
+type Compare_Lease struct {
+	Lease int64 `protobuf:"varint,8,opt,name=lease,proto3,oneof"`
+}
+
+func (*Compare_Version) isCompare_TargetUnion()        {}
+func (*Compare_CreateRevision) isCompare_TargetUnion() {}
+func (*Compare_ModRevision) isCompare_TargetUnion()    {}
+func (*Compare_Value) isCompare_TargetUnion()          {}
+func (*Compare_Lease) isCompare_TargetUnion()          {}
+
+func (m *Compare) GetTargetUnion() isCompare_TargetUnion {
+	if m != nil {
+		return m.TargetUnion
+	}
+	return nil
+}
+
+func (m *Compare) GetResult() Compare_CompareResult {
+	if m != nil {
+		return m.Result
+	}
+	return Compare_EQUAL
+}
+
+func (m *Compare) GetTarget() Compare_CompareTarget {
+	if m != nil {
+		return m.Target
+	}
+	return Compare_VERSION
+}
+
+func (m *Compare) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *Compare) GetVersion() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_Version); ok {
+		return x.Version
+	}
+	return 0
+}
+
+func (m *Compare) GetCreateRevision() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_CreateRevision); ok {
+		return x.CreateRevision
+	}
+	return 0
+}
+
+func (m *Compare) GetModRevision() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_ModRevision); ok {
+		return x.ModRevision
+	}
+	return 0
+}
+
+func (m *Compare) GetValue() []byte {
+	if x, ok := m.GetTargetUnion().(*Compare_Value); ok {
+		return x.Value
+	}
+	return nil
+}
+
+func (m *Compare) GetLease() int64 {
+	if x, ok := m.GetTargetUnion().(*Compare_Lease); ok {
+		return x.Lease
+	}
+	return 0
+}
+
+func (m *Compare) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*Compare) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _Compare_OneofMarshaler, _Compare_OneofUnmarshaler, _Compare_OneofSizer, []interface{}{
+		(*Compare_Version)(nil),
+		(*Compare_CreateRevision)(nil),
+		(*Compare_ModRevision)(nil),
+		(*Compare_Value)(nil),
+		(*Compare_Lease)(nil),
+	}
+}
+
+func _Compare_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*Compare)
+	// target_union
+	switch x := m.TargetUnion.(type) {
+	case *Compare_Version:
+		_ = b.EncodeVarint(4<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.Version))
+	case *Compare_CreateRevision:
+		_ = b.EncodeVarint(5<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.CreateRevision))
+	case *Compare_ModRevision:
+		_ = b.EncodeVarint(6<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.ModRevision))
+	case *Compare_Value:
+		_ = b.EncodeVarint(7<<3 | proto.WireBytes)
+		_ = b.EncodeRawBytes(x.Value)
+	case *Compare_Lease:
+		_ = b.EncodeVarint(8<<3 | proto.WireVarint)
+		_ = b.EncodeVarint(uint64(x.Lease))
+	case nil:
+	default:
+		return fmt.Errorf("Compare.TargetUnion has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _Compare_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*Compare)
+	switch tag {
+	case 4: // target_union.version
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_Version{int64(x)}
+		return true, err
+	case 5: // target_union.create_revision
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_CreateRevision{int64(x)}
+		return true, err
+	case 6: // target_union.mod_revision
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_ModRevision{int64(x)}
+		return true, err
+	case 7: // target_union.value
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeRawBytes(true)
+		m.TargetUnion = &Compare_Value{x}
+		return true, err
+	case 8: // target_union.lease
+		if wire != proto.WireVarint {
+			return true, proto.ErrInternalBadWireType
+		}
+		x, err := b.DecodeVarint()
+		m.TargetUnion = &Compare_Lease{int64(x)}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _Compare_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*Compare)
+	// target_union
+	switch x := m.TargetUnion.(type) {
+	case *Compare_Version:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.Version))
+	case *Compare_CreateRevision:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.CreateRevision))
+	case *Compare_ModRevision:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.ModRevision))
+	case *Compare_Value:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(len(x.Value)))
+		n += len(x.Value)
+	case *Compare_Lease:
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(x.Lease))
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+type TxnRequest struct {
+	// compare is a list of predicates representing a conjunction of terms.
+	// If the comparisons succeed, then the success requests will be processed in order,
+	// and the response will contain their respective responses in order.
+	// If the comparisons fail, then the failure requests will be processed in order,
+	// and the response will contain their respective responses in order.
+	Compare []*Compare `protobuf:"bytes,1,rep,name=compare,proto3" json:"compare,omitempty"`
+	// success is a list of requests which will be applied when compare evaluates to true.
+	Success []*RequestOp `protobuf:"bytes,2,rep,name=success,proto3" json:"success,omitempty"`
+	// failure is a list of requests which will be applied when compare evaluates to false.
+	Failure              []*RequestOp `protobuf:"bytes,3,rep,name=failure,proto3" json:"failure,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *TxnRequest) Reset()         { *m = TxnRequest{} }
+func (m *TxnRequest) String() string { return proto.CompactTextString(m) }
+func (*TxnRequest) ProtoMessage()    {}
+func (*TxnRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{10}
+}
+func (m *TxnRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TxnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TxnRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TxnRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TxnRequest.Merge(m, src)
+}
+func (m *TxnRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *TxnRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_TxnRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnRequest proto.InternalMessageInfo
+
+func (m *TxnRequest) GetCompare() []*Compare {
+	if m != nil {
+		return m.Compare
+	}
+	return nil
+}
+
+func (m *TxnRequest) GetSuccess() []*RequestOp {
+	if m != nil {
+		return m.Success
+	}
+	return nil
+}
+
+func (m *TxnRequest) GetFailure() []*RequestOp {
+	if m != nil {
+		return m.Failure
+	}
+	return nil
+}
+
+type TxnResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// succeeded is set to true if the compare evaluated to true or false otherwise.
+	Succeeded bool `protobuf:"varint,2,opt,name=succeeded,proto3" json:"succeeded,omitempty"`
+	// responses is a list of responses corresponding to the results from applying
+	// success if succeeded is true or failure if succeeded is false.
+	Responses            []*ResponseOp `protobuf:"bytes,3,rep,name=responses,proto3" json:"responses,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *TxnResponse) Reset()         { *m = TxnResponse{} }
+func (m *TxnResponse) String() string { return proto.CompactTextString(m) }
+func (*TxnResponse) ProtoMessage()    {}
+func (*TxnResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{11}
+}
+func (m *TxnResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *TxnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_TxnResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *TxnResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TxnResponse.Merge(m, src)
+}
+func (m *TxnResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *TxnResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_TxnResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TxnResponse proto.InternalMessageInfo
+
+func (m *TxnResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *TxnResponse) GetSucceeded() bool {
+	if m != nil {
+		return m.Succeeded
+	}
+	return false
+}
+
+func (m *TxnResponse) GetResponses() []*ResponseOp {
+	if m != nil {
+		return m.Responses
+	}
+	return nil
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+type CompactionRequest struct {
+	// revision is the key-value store revision for the compaction operation.
+	Revision int64 `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+	// physical is set so the RPC will wait until the compaction is physically
+	// applied to the local database such that compacted entries are totally
+	// removed from the backend database.
+	Physical             bool     `protobuf:"varint,2,opt,name=physical,proto3" json:"physical,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *CompactionRequest) Reset()         { *m = CompactionRequest{} }
+func (m *CompactionRequest) String() string { return proto.CompactTextString(m) }
+func (*CompactionRequest) ProtoMessage()    {}
+func (*CompactionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{12}
+}
+func (m *CompactionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CompactionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CompactionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CompactionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CompactionRequest.Merge(m, src)
+}
+func (m *CompactionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *CompactionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_CompactionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionRequest proto.InternalMessageInfo
+
+func (m *CompactionRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+func (m *CompactionRequest) GetPhysical() bool {
+	if m != nil {
+		return m.Physical
+	}
+	return false
+}
+
+type CompactionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *CompactionResponse) Reset()         { *m = CompactionResponse{} }
+func (m *CompactionResponse) String() string { return proto.CompactTextString(m) }
+func (*CompactionResponse) ProtoMessage()    {}
+func (*CompactionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{13}
+}
+func (m *CompactionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *CompactionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_CompactionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *CompactionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CompactionResponse.Merge(m, src)
+}
+func (m *CompactionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *CompactionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_CompactionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CompactionResponse proto.InternalMessageInfo
+
+func (m *CompactionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type HashRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashRequest) Reset()         { *m = HashRequest{} }
+func (m *HashRequest) String() string { return proto.CompactTextString(m) }
+func (*HashRequest) ProtoMessage()    {}
+func (*HashRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{14}
+}
+func (m *HashRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashRequest.Merge(m, src)
+}
+func (m *HashRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashRequest proto.InternalMessageInfo
+
+type HashKVRequest struct {
+	// revision is the key-value store revision for the hash operation.
+	Revision             int64    `protobuf:"varint,1,opt,name=revision,proto3" json:"revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashKVRequest) Reset()         { *m = HashKVRequest{} }
+func (m *HashKVRequest) String() string { return proto.CompactTextString(m) }
+func (*HashKVRequest) ProtoMessage()    {}
+func (*HashKVRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{15}
+}
+func (m *HashKVRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashKVRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashKVRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashKVRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashKVRequest.Merge(m, src)
+}
+func (m *HashKVRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashKVRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashKVRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVRequest proto.InternalMessageInfo
+
+func (m *HashKVRequest) GetRevision() int64 {
+	if m != nil {
+		return m.Revision
+	}
+	return 0
+}
+
+type HashKVResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+	Hash uint32 `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	// compact_revision is the compacted revision of key-value store when hash begins.
+	CompactRevision      int64    `protobuf:"varint,3,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashKVResponse) Reset()         { *m = HashKVResponse{} }
+func (m *HashKVResponse) String() string { return proto.CompactTextString(m) }
+func (*HashKVResponse) ProtoMessage()    {}
+func (*HashKVResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{16}
+}
+func (m *HashKVResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashKVResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashKVResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashKVResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashKVResponse.Merge(m, src)
+}
+func (m *HashKVResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashKVResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashKVResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashKVResponse proto.InternalMessageInfo
+
+func (m *HashKVResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *HashKVResponse) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+func (m *HashKVResponse) GetCompactRevision() int64 {
+	if m != nil {
+		return m.CompactRevision
+	}
+	return 0
+}
+
+type HashResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// hash is the hash value computed from the responding member's KV's backend.
+	Hash                 uint32   `protobuf:"varint,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HashResponse) Reset()         { *m = HashResponse{} }
+func (m *HashResponse) String() string { return proto.CompactTextString(m) }
+func (*HashResponse) ProtoMessage()    {}
+func (*HashResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{17}
+}
+func (m *HashResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HashResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HashResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HashResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HashResponse.Merge(m, src)
+}
+func (m *HashResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *HashResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_HashResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HashResponse proto.InternalMessageInfo
+
+func (m *HashResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *HashResponse) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+type SnapshotRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SnapshotRequest) Reset()         { *m = SnapshotRequest{} }
+func (m *SnapshotRequest) String() string { return proto.CompactTextString(m) }
+func (*SnapshotRequest) ProtoMessage()    {}
+func (*SnapshotRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{18}
+}
+func (m *SnapshotRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotRequest.Merge(m, src)
+}
+func (m *SnapshotRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotRequest proto.InternalMessageInfo
+
+type SnapshotResponse struct {
+	// header has the current key-value store information. The first header in the snapshot
+	// stream indicates the point in time of the snapshot.
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// remaining_bytes is the number of blob bytes to be sent after this message
+	RemainingBytes uint64 `protobuf:"varint,2,opt,name=remaining_bytes,json=remainingBytes,proto3" json:"remaining_bytes,omitempty"`
+	// blob contains the next chunk of the snapshot in the snapshot stream.
+	Blob                 []byte   `protobuf:"bytes,3,opt,name=blob,proto3" json:"blob,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *SnapshotResponse) Reset()         { *m = SnapshotResponse{} }
+func (m *SnapshotResponse) String() string { return proto.CompactTextString(m) }
+func (*SnapshotResponse) ProtoMessage()    {}
+func (*SnapshotResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{19}
+}
+func (m *SnapshotResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotResponse.Merge(m, src)
+}
+func (m *SnapshotResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotResponse proto.InternalMessageInfo
+
+func (m *SnapshotResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *SnapshotResponse) GetRemainingBytes() uint64 {
+	if m != nil {
+		return m.RemainingBytes
+	}
+	return 0
+}
+
+func (m *SnapshotResponse) GetBlob() []byte {
+	if m != nil {
+		return m.Blob
+	}
+	return nil
+}
+
+type WatchRequest struct {
+	// request_union is a request to either create a new watcher or cancel an existing watcher.
+	//
+	// Types that are valid to be assigned to RequestUnion:
+	//	*WatchRequest_CreateRequest
+	//	*WatchRequest_CancelRequest
+	//	*WatchRequest_ProgressRequest
+	RequestUnion         isWatchRequest_RequestUnion `protobuf_oneof:"request_union"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *WatchRequest) Reset()         { *m = WatchRequest{} }
+func (m *WatchRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchRequest) ProtoMessage()    {}
+func (*WatchRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{20}
+}
+func (m *WatchRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchRequest.Merge(m, src)
+}
+func (m *WatchRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchRequest proto.InternalMessageInfo
+
+type isWatchRequest_RequestUnion interface {
+	isWatchRequest_RequestUnion()
+	MarshalTo([]byte) (int, error)
+	Size() int
+}
+
+type WatchRequest_CreateRequest struct {
+	CreateRequest *WatchCreateRequest `protobuf:"bytes,1,opt,name=create_request,json=createRequest,proto3,oneof"`
+}
+type WatchRequest_CancelRequest struct {
+	CancelRequest *WatchCancelRequest `protobuf:"bytes,2,opt,name=cancel_request,json=cancelRequest,proto3,oneof"`
+}
+type WatchRequest_ProgressRequest struct {
+	ProgressRequest *WatchProgressRequest `protobuf:"bytes,3,opt,name=progress_request,json=progressRequest,proto3,oneof"`
+}
+
+func (*WatchRequest_CreateRequest) isWatchRequest_RequestUnion()   {}
+func (*WatchRequest_CancelRequest) isWatchRequest_RequestUnion()   {}
+func (*WatchRequest_ProgressRequest) isWatchRequest_RequestUnion() {}
+
+func (m *WatchRequest) GetRequestUnion() isWatchRequest_RequestUnion {
+	if m != nil {
+		return m.RequestUnion
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetCreateRequest() *WatchCreateRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_CreateRequest); ok {
+		return x.CreateRequest
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetCancelRequest() *WatchCancelRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_CancelRequest); ok {
+		return x.CancelRequest
+	}
+	return nil
+}
+
+func (m *WatchRequest) GetProgressRequest() *WatchProgressRequest {
+	if x, ok := m.GetRequestUnion().(*WatchRequest_ProgressRequest); ok {
+		return x.ProgressRequest
+	}
+	return nil
+}
+
+// XXX_OneofFuncs is for the internal use of the proto package.
+func (*WatchRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) {
+	return _WatchRequest_OneofMarshaler, _WatchRequest_OneofUnmarshaler, _WatchRequest_OneofSizer, []interface{}{
+		(*WatchRequest_CreateRequest)(nil),
+		(*WatchRequest_CancelRequest)(nil),
+		(*WatchRequest_ProgressRequest)(nil),
+	}
+}
+
+func _WatchRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error {
+	m := msg.(*WatchRequest)
+	// request_union
+	switch x := m.RequestUnion.(type) {
+	case *WatchRequest_CreateRequest:
+		_ = b.EncodeVarint(1<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.CreateRequest); err != nil {
+			return err
+		}
+	case *WatchRequest_CancelRequest:
+		_ = b.EncodeVarint(2<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.CancelRequest); err != nil {
+			return err
+		}
+	case *WatchRequest_ProgressRequest:
+		_ = b.EncodeVarint(3<<3 | proto.WireBytes)
+		if err := b.EncodeMessage(x.ProgressRequest); err != nil {
+			return err
+		}
+	case nil:
+	default:
+		return fmt.Errorf("WatchRequest.RequestUnion has unexpected type %T", x)
+	}
+	return nil
+}
+
+func _WatchRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) {
+	m := msg.(*WatchRequest)
+	switch tag {
+	case 1: // request_union.create_request
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(WatchCreateRequest)
+		err := b.DecodeMessage(msg)
+		m.RequestUnion = &WatchRequest_CreateRequest{msg}
+		return true, err
+	case 2: // request_union.cancel_request
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(WatchCancelRequest)
+		err := b.DecodeMessage(msg)
+		m.RequestUnion = &WatchRequest_CancelRequest{msg}
+		return true, err
+	case 3: // request_union.progress_request
+		if wire != proto.WireBytes {
+			return true, proto.ErrInternalBadWireType
+		}
+		msg := new(WatchProgressRequest)
+		err := b.DecodeMessage(msg)
+		m.RequestUnion = &WatchRequest_ProgressRequest{msg}
+		return true, err
+	default:
+		return false, nil
+	}
+}
+
+func _WatchRequest_OneofSizer(msg proto.Message) (n int) {
+	m := msg.(*WatchRequest)
+	// request_union
+	switch x := m.RequestUnion.(type) {
+	case *WatchRequest_CreateRequest:
+		s := proto.Size(x.CreateRequest)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *WatchRequest_CancelRequest:
+		s := proto.Size(x.CancelRequest)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case *WatchRequest_ProgressRequest:
+		s := proto.Size(x.ProgressRequest)
+		n += 1 // tag and wire
+		n += proto.SizeVarint(uint64(s))
+		n += s
+	case nil:
+	default:
+		panic(fmt.Sprintf("proto: unexpected type %T in oneof", x))
+	}
+	return n
+}
+
+type WatchCreateRequest struct {
+	// key is the key to register for watching.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+	// only the key argument is watched. If range_end is equal to '\0', all keys greater than
+	// or equal to the key argument are watched.
+	// If the range_end is one bit larger than the given key,
+	// then all keys with the prefix (the given key) will be watched.
+	RangeEnd []byte `protobuf:"bytes,2,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	// start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+	StartRevision int64 `protobuf:"varint,3,opt,name=start_revision,json=startRevision,proto3" json:"start_revision,omitempty"`
+	// progress_notify is set so that the etcd server will periodically send a WatchResponse with
+	// no events to the new watcher if there are no recent events. It is useful when clients
+	// wish to recover a disconnected watcher starting from a recent known revision.
+	// The etcd server may decide how often it will send notifications based on current load.
+	ProgressNotify bool `protobuf:"varint,4,opt,name=progress_notify,json=progressNotify,proto3" json:"progress_notify,omitempty"`
+	// filters filter the events at server side before it sends back to the watcher.
+	Filters []WatchCreateRequest_FilterType `protobuf:"varint,5,rep,packed,name=filters,proto3,enum=etcdserverpb.WatchCreateRequest_FilterType" json:"filters,omitempty"`
+	// If prev_kv is set, created watcher gets the previous KV before the event happens.
+	// If the previous KV is already compacted, nothing will be returned.
+	PrevKv bool `protobuf:"varint,6,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	// If watch_id is provided and non-zero, it will be assigned to this watcher.
+	// Since creating a watcher in etcd is not a synchronous operation,
+	// this can be used ensure that ordering is correct when creating multiple
+	// watchers on the same stream. Creating a watcher with an ID already in
+	// use on the stream will cause an error to be returned.
+	WatchId int64 `protobuf:"varint,7,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	// fragment enables splitting large revisions into multiple watch responses.
+	Fragment             bool     `protobuf:"varint,8,opt,name=fragment,proto3" json:"fragment,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchCreateRequest) Reset()         { *m = WatchCreateRequest{} }
+func (m *WatchCreateRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCreateRequest) ProtoMessage()    {}
+func (*WatchCreateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{21}
+}
+func (m *WatchCreateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchCreateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchCreateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchCreateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchCreateRequest.Merge(m, src)
+}
+func (m *WatchCreateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchCreateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchCreateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCreateRequest proto.InternalMessageInfo
+
+func (m *WatchCreateRequest) GetKey() []byte {
+	if m != nil {
+		return m.Key
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetRangeEnd() []byte {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetStartRevision() int64 {
+	if m != nil {
+		return m.StartRevision
+	}
+	return 0
+}
+
+func (m *WatchCreateRequest) GetProgressNotify() bool {
+	if m != nil {
+		return m.ProgressNotify
+	}
+	return false
+}
+
+func (m *WatchCreateRequest) GetFilters() []WatchCreateRequest_FilterType {
+	if m != nil {
+		return m.Filters
+	}
+	return nil
+}
+
+func (m *WatchCreateRequest) GetPrevKv() bool {
+	if m != nil {
+		return m.PrevKv
+	}
+	return false
+}
+
+func (m *WatchCreateRequest) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+func (m *WatchCreateRequest) GetFragment() bool {
+	if m != nil {
+		return m.Fragment
+	}
+	return false
+}
+
+type WatchCancelRequest struct {
+	// watch_id is the watcher id to cancel so that no more events are transmitted.
+	WatchId              int64    `protobuf:"varint,1,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchCancelRequest) Reset()         { *m = WatchCancelRequest{} }
+func (m *WatchCancelRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchCancelRequest) ProtoMessage()    {}
+func (*WatchCancelRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{22}
+}
+func (m *WatchCancelRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchCancelRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchCancelRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchCancelRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchCancelRequest.Merge(m, src)
+}
+func (m *WatchCancelRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchCancelRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchCancelRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchCancelRequest proto.InternalMessageInfo
+
+func (m *WatchCancelRequest) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+type WatchProgressRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *WatchProgressRequest) Reset()         { *m = WatchProgressRequest{} }
+func (m *WatchProgressRequest) String() string { return proto.CompactTextString(m) }
+func (*WatchProgressRequest) ProtoMessage()    {}
+func (*WatchProgressRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{23}
+}
+func (m *WatchProgressRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchProgressRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchProgressRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchProgressRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchProgressRequest.Merge(m, src)
+}
+func (m *WatchProgressRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchProgressRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchProgressRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchProgressRequest proto.InternalMessageInfo
+
+type WatchResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// watch_id is the ID of the watcher that corresponds to the response.
+	WatchId int64 `protobuf:"varint,2,opt,name=watch_id,json=watchId,proto3" json:"watch_id,omitempty"`
+	// created is set to true if the response is for a create watch request.
+	// The client should record the watch_id and expect to receive events for
+	// the created watcher from the same stream.
+	// All events sent to the created watcher will attach with the same watch_id.
+	Created bool `protobuf:"varint,3,opt,name=created,proto3" json:"created,omitempty"`
+	// canceled is set to true if the response is for a cancel watch request.
+	// No further events will be sent to the canceled watcher.
+	Canceled bool `protobuf:"varint,4,opt,name=canceled,proto3" json:"canceled,omitempty"`
+	// compact_revision is set to the minimum index if a watcher tries to watch
+	// at a compacted index.
+	//
+	// This happens when creating a watcher at a compacted revision or the watcher cannot
+	// catch up with the progress of the key-value store.
+	//
+	// The client should treat the watcher as canceled and should not try to create any
+	// watcher with the same start_revision again.
+	CompactRevision int64 `protobuf:"varint,5,opt,name=compact_revision,json=compactRevision,proto3" json:"compact_revision,omitempty"`
+	// cancel_reason indicates the reason for canceling the watcher.
+	CancelReason string `protobuf:"bytes,6,opt,name=cancel_reason,json=cancelReason,proto3" json:"cancel_reason,omitempty"`
+	// framgment is true if large watch response was split over multiple responses.
+	Fragment             bool            `protobuf:"varint,7,opt,name=fragment,proto3" json:"fragment,omitempty"`
+	Events               []*mvccpb.Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *WatchResponse) Reset()         { *m = WatchResponse{} }
+func (m *WatchResponse) String() string { return proto.CompactTextString(m) }
+func (*WatchResponse) ProtoMessage()    {}
+func (*WatchResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{24}
+}
+func (m *WatchResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *WatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_WatchResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *WatchResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_WatchResponse.Merge(m, src)
+}
+func (m *WatchResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *WatchResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_WatchResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_WatchResponse proto.InternalMessageInfo
+
+func (m *WatchResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *WatchResponse) GetWatchId() int64 {
+	if m != nil {
+		return m.WatchId
+	}
+	return 0
+}
+
+func (m *WatchResponse) GetCreated() bool {
+	if m != nil {
+		return m.Created
+	}
+	return false
+}
+
+func (m *WatchResponse) GetCanceled() bool {
+	if m != nil {
+		return m.Canceled
+	}
+	return false
+}
+
+func (m *WatchResponse) GetCompactRevision() int64 {
+	if m != nil {
+		return m.CompactRevision
+	}
+	return 0
+}
+
+func (m *WatchResponse) GetCancelReason() string {
+	if m != nil {
+		return m.CancelReason
+	}
+	return ""
+}
+
+func (m *WatchResponse) GetFragment() bool {
+	if m != nil {
+		return m.Fragment
+	}
+	return false
+}
+
+func (m *WatchResponse) GetEvents() []*mvccpb.Event {
+	if m != nil {
+		return m.Events
+	}
+	return nil
+}
+
+type LeaseGrantRequest struct {
+	// TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+	TTL int64 `protobuf:"varint,1,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	// ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+	ID                   int64    `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseGrantRequest) Reset()         { *m = LeaseGrantRequest{} }
+func (m *LeaseGrantRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantRequest) ProtoMessage()    {}
+func (*LeaseGrantRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{25}
+}
+func (m *LeaseGrantRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseGrantRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseGrantRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseGrantRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseGrantRequest.Merge(m, src)
+}
+func (m *LeaseGrantRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseGrantRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseGrantRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantRequest proto.InternalMessageInfo
+
+func (m *LeaseGrantRequest) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseGrantRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseGrantResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID for the granted lease.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the server chosen lease time-to-live in seconds.
+	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	Error                string   `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseGrantResponse) Reset()         { *m = LeaseGrantResponse{} }
+func (m *LeaseGrantResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseGrantResponse) ProtoMessage()    {}
+func (*LeaseGrantResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{26}
+}
+func (m *LeaseGrantResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseGrantResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseGrantResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseGrantResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseGrantResponse.Merge(m, src)
+}
+func (m *LeaseGrantResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseGrantResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseGrantResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseGrantResponse proto.InternalMessageInfo
+
+func (m *LeaseGrantResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseGrantResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseGrantResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseGrantResponse) GetError() string {
+	if m != nil {
+		return m.Error
+	}
+	return ""
+}
+
+type LeaseRevokeRequest struct {
+	// ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseRevokeRequest) Reset()         { *m = LeaseRevokeRequest{} }
+func (m *LeaseRevokeRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeRequest) ProtoMessage()    {}
+func (*LeaseRevokeRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{27}
+}
+func (m *LeaseRevokeRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseRevokeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseRevokeRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseRevokeRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseRevokeRequest.Merge(m, src)
+}
+func (m *LeaseRevokeRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseRevokeRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseRevokeRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeRequest proto.InternalMessageInfo
+
+func (m *LeaseRevokeRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseRevokeResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseRevokeResponse) Reset()         { *m = LeaseRevokeResponse{} }
+func (m *LeaseRevokeResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseRevokeResponse) ProtoMessage()    {}
+func (*LeaseRevokeResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{28}
+}
+func (m *LeaseRevokeResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseRevokeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseRevokeResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseRevokeResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseRevokeResponse.Merge(m, src)
+}
+func (m *LeaseRevokeResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseRevokeResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseRevokeResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseRevokeResponse proto.InternalMessageInfo
+
+func (m *LeaseRevokeResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type LeaseKeepAliveRequest struct {
+	// ID is the lease ID for the lease to keep alive.
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseKeepAliveRequest) Reset()         { *m = LeaseKeepAliveRequest{} }
+func (m *LeaseKeepAliveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveRequest) ProtoMessage()    {}
+func (*LeaseKeepAliveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{29}
+}
+func (m *LeaseKeepAliveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseKeepAliveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseKeepAliveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseKeepAliveRequest.Merge(m, src)
+}
+func (m *LeaseKeepAliveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseKeepAliveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseKeepAliveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveRequest proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseKeepAliveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID from the keep alive request.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the new time-to-live for the lease.
+	TTL                  int64    `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseKeepAliveResponse) Reset()         { *m = LeaseKeepAliveResponse{} }
+func (m *LeaseKeepAliveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseKeepAliveResponse) ProtoMessage()    {}
+func (*LeaseKeepAliveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{30}
+}
+func (m *LeaseKeepAliveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseKeepAliveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseKeepAliveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseKeepAliveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseKeepAliveResponse.Merge(m, src)
+}
+func (m *LeaseKeepAliveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseKeepAliveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseKeepAliveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseKeepAliveResponse proto.InternalMessageInfo
+
+func (m *LeaseKeepAliveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseKeepAliveResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseKeepAliveResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+type LeaseTimeToLiveRequest struct {
+	// ID is the lease ID for the lease.
+	ID int64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// keys is true to query all the keys attached to this lease.
+	Keys                 bool     `protobuf:"varint,2,opt,name=keys,proto3" json:"keys,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseTimeToLiveRequest) Reset()         { *m = LeaseTimeToLiveRequest{} }
+func (m *LeaseTimeToLiveRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveRequest) ProtoMessage()    {}
+func (*LeaseTimeToLiveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{31}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseTimeToLiveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseTimeToLiveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseTimeToLiveRequest.Merge(m, src)
+}
+func (m *LeaseTimeToLiveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseTimeToLiveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseTimeToLiveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveRequest proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveRequest) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveRequest) GetKeys() bool {
+	if m != nil {
+		return m.Keys
+	}
+	return false
+}
+
+type LeaseTimeToLiveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// ID is the lease ID from the keep alive request.
+	ID int64 `protobuf:"varint,2,opt,name=ID,proto3" json:"ID,omitempty"`
+	// TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+	TTL int64 `protobuf:"varint,3,opt,name=TTL,proto3" json:"TTL,omitempty"`
+	// GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+	GrantedTTL int64 `protobuf:"varint,4,opt,name=grantedTTL,proto3" json:"grantedTTL,omitempty"`
+	// Keys is the list of keys attached to this lease.
+	Keys                 [][]byte `protobuf:"bytes,5,rep,name=keys,proto3" json:"keys,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseTimeToLiveResponse) Reset()         { *m = LeaseTimeToLiveResponse{} }
+func (m *LeaseTimeToLiveResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseTimeToLiveResponse) ProtoMessage()    {}
+func (*LeaseTimeToLiveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{32}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseTimeToLiveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseTimeToLiveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseTimeToLiveResponse.Merge(m, src)
+}
+func (m *LeaseTimeToLiveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseTimeToLiveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseTimeToLiveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseTimeToLiveResponse proto.InternalMessageInfo
+
+func (m *LeaseTimeToLiveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseTimeToLiveResponse) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetTTL() int64 {
+	if m != nil {
+		return m.TTL
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetGrantedTTL() int64 {
+	if m != nil {
+		return m.GrantedTTL
+	}
+	return 0
+}
+
+func (m *LeaseTimeToLiveResponse) GetKeys() [][]byte {
+	if m != nil {
+		return m.Keys
+	}
+	return nil
+}
+
+type LeaseLeasesRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseLeasesRequest) Reset()         { *m = LeaseLeasesRequest{} }
+func (m *LeaseLeasesRequest) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesRequest) ProtoMessage()    {}
+func (*LeaseLeasesRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{33}
+}
+func (m *LeaseLeasesRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseLeasesRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseLeasesRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseLeasesRequest.Merge(m, src)
+}
+func (m *LeaseLeasesRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseLeasesRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseLeasesRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesRequest proto.InternalMessageInfo
+
+type LeaseStatus struct {
+	ID                   int64    `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LeaseStatus) Reset()         { *m = LeaseStatus{} }
+func (m *LeaseStatus) String() string { return proto.CompactTextString(m) }
+func (*LeaseStatus) ProtoMessage()    {}
+func (*LeaseStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{34}
+}
+func (m *LeaseStatus) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseStatus.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseStatus.Merge(m, src)
+}
+func (m *LeaseStatus) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseStatus proto.InternalMessageInfo
+
+func (m *LeaseStatus) GetID() int64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type LeaseLeasesResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Leases               []*LeaseStatus  `protobuf:"bytes,2,rep,name=leases,proto3" json:"leases,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *LeaseLeasesResponse) Reset()         { *m = LeaseLeasesResponse{} }
+func (m *LeaseLeasesResponse) String() string { return proto.CompactTextString(m) }
+func (*LeaseLeasesResponse) ProtoMessage()    {}
+func (*LeaseLeasesResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{35}
+}
+func (m *LeaseLeasesResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *LeaseLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_LeaseLeasesResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *LeaseLeasesResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LeaseLeasesResponse.Merge(m, src)
+}
+func (m *LeaseLeasesResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *LeaseLeasesResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_LeaseLeasesResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LeaseLeasesResponse proto.InternalMessageInfo
+
+func (m *LeaseLeasesResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *LeaseLeasesResponse) GetLeases() []*LeaseStatus {
+	if m != nil {
+		return m.Leases
+	}
+	return nil
+}
+
+type Member struct {
+	// ID is the member ID for this member.
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// peerURLs is the list of URLs the member exposes to the cluster for communication.
+	PeerURLs []string `protobuf:"bytes,3,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	// clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+	ClientURLs           []string `protobuf:"bytes,4,rep,name=clientURLs,proto3" json:"clientURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Member) Reset()         { *m = Member{} }
+func (m *Member) String() string { return proto.CompactTextString(m) }
+func (*Member) ProtoMessage()    {}
+func (*Member) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{36}
+}
+func (m *Member) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Member) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Member.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Member) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Member.Merge(m, src)
+}
+func (m *Member) XXX_Size() int {
+	return m.Size()
+}
+func (m *Member) XXX_DiscardUnknown() {
+	xxx_messageInfo_Member.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Member proto.InternalMessageInfo
+
+func (m *Member) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *Member) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Member) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+func (m *Member) GetClientURLs() []string {
+	if m != nil {
+		return m.ClientURLs
+	}
+	return nil
+}
+
+type MemberAddRequest struct {
+	// peerURLs is the list of URLs the added member will use to communicate with the cluster.
+	PeerURLs             []string `protobuf:"bytes,1,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberAddRequest) Reset()         { *m = MemberAddRequest{} }
+func (m *MemberAddRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberAddRequest) ProtoMessage()    {}
+func (*MemberAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{37}
+}
+func (m *MemberAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberAddRequest.Merge(m, src)
+}
+func (m *MemberAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddRequest proto.InternalMessageInfo
+
+func (m *MemberAddRequest) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+type MemberAddResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// member is the member information for the added member.
+	Member *Member `protobuf:"bytes,2,opt,name=member,proto3" json:"member,omitempty"`
+	// members is a list of all members after adding the new member.
+	Members              []*Member `protobuf:"bytes,3,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberAddResponse) Reset()         { *m = MemberAddResponse{} }
+func (m *MemberAddResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberAddResponse) ProtoMessage()    {}
+func (*MemberAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{38}
+}
+func (m *MemberAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberAddResponse.Merge(m, src)
+}
+func (m *MemberAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberAddResponse proto.InternalMessageInfo
+
+func (m *MemberAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberAddResponse) GetMember() *Member {
+	if m != nil {
+		return m.Member
+	}
+	return nil
+}
+
+func (m *MemberAddResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberRemoveRequest struct {
+	// ID is the member ID of the member to remove.
+	ID                   uint64   `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberRemoveRequest) Reset()         { *m = MemberRemoveRequest{} }
+func (m *MemberRemoveRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveRequest) ProtoMessage()    {}
+func (*MemberRemoveRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{39}
+}
+func (m *MemberRemoveRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberRemoveRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberRemoveRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberRemoveRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberRemoveRequest.Merge(m, src)
+}
+func (m *MemberRemoveRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberRemoveRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberRemoveRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveRequest proto.InternalMessageInfo
+
+func (m *MemberRemoveRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+type MemberRemoveResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after removing the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberRemoveResponse) Reset()         { *m = MemberRemoveResponse{} }
+func (m *MemberRemoveResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberRemoveResponse) ProtoMessage()    {}
+func (*MemberRemoveResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{40}
+}
+func (m *MemberRemoveResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberRemoveResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberRemoveResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberRemoveResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberRemoveResponse.Merge(m, src)
+}
+func (m *MemberRemoveResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberRemoveResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberRemoveResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberRemoveResponse proto.InternalMessageInfo
+
+func (m *MemberRemoveResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberRemoveResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberUpdateRequest struct {
+	// ID is the member ID of the member to update.
+	ID uint64 `protobuf:"varint,1,opt,name=ID,proto3" json:"ID,omitempty"`
+	// peerURLs is the new list of URLs the member will use to communicate with the cluster.
+	PeerURLs             []string `protobuf:"bytes,2,rep,name=peerURLs,proto3" json:"peerURLs,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberUpdateRequest) Reset()         { *m = MemberUpdateRequest{} }
+func (m *MemberUpdateRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateRequest) ProtoMessage()    {}
+func (*MemberUpdateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{41}
+}
+func (m *MemberUpdateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberUpdateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberUpdateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberUpdateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberUpdateRequest.Merge(m, src)
+}
+func (m *MemberUpdateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberUpdateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberUpdateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateRequest proto.InternalMessageInfo
+
+func (m *MemberUpdateRequest) GetID() uint64 {
+	if m != nil {
+		return m.ID
+	}
+	return 0
+}
+
+func (m *MemberUpdateRequest) GetPeerURLs() []string {
+	if m != nil {
+		return m.PeerURLs
+	}
+	return nil
+}
+
+type MemberUpdateResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members after updating the member.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberUpdateResponse) Reset()         { *m = MemberUpdateResponse{} }
+func (m *MemberUpdateResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberUpdateResponse) ProtoMessage()    {}
+func (*MemberUpdateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{42}
+}
+func (m *MemberUpdateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberUpdateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberUpdateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberUpdateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberUpdateResponse.Merge(m, src)
+}
+func (m *MemberUpdateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberUpdateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberUpdateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberUpdateResponse proto.InternalMessageInfo
+
+func (m *MemberUpdateResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberUpdateResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type MemberListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MemberListRequest) Reset()         { *m = MemberListRequest{} }
+func (m *MemberListRequest) String() string { return proto.CompactTextString(m) }
+func (*MemberListRequest) ProtoMessage()    {}
+func (*MemberListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{43}
+}
+func (m *MemberListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberListRequest.Merge(m, src)
+}
+func (m *MemberListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListRequest proto.InternalMessageInfo
+
+type MemberListResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// members is a list of all members associated with the cluster.
+	Members              []*Member `protobuf:"bytes,2,rep,name=members,proto3" json:"members,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *MemberListResponse) Reset()         { *m = MemberListResponse{} }
+func (m *MemberListResponse) String() string { return proto.CompactTextString(m) }
+func (*MemberListResponse) ProtoMessage()    {}
+func (*MemberListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{44}
+}
+func (m *MemberListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MemberListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MemberListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MemberListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MemberListResponse.Merge(m, src)
+}
+func (m *MemberListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MemberListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MemberListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MemberListResponse proto.InternalMessageInfo
+
+func (m *MemberListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *MemberListResponse) GetMembers() []*Member {
+	if m != nil {
+		return m.Members
+	}
+	return nil
+}
+
+type DefragmentRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DefragmentRequest) Reset()         { *m = DefragmentRequest{} }
+func (m *DefragmentRequest) String() string { return proto.CompactTextString(m) }
+func (*DefragmentRequest) ProtoMessage()    {}
+func (*DefragmentRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{45}
+}
+func (m *DefragmentRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DefragmentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DefragmentRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DefragmentRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DefragmentRequest.Merge(m, src)
+}
+func (m *DefragmentRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *DefragmentRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DefragmentRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentRequest proto.InternalMessageInfo
+
+type DefragmentResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *DefragmentResponse) Reset()         { *m = DefragmentResponse{} }
+func (m *DefragmentResponse) String() string { return proto.CompactTextString(m) }
+func (*DefragmentResponse) ProtoMessage()    {}
+func (*DefragmentResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{46}
+}
+func (m *DefragmentResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *DefragmentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_DefragmentResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *DefragmentResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DefragmentResponse.Merge(m, src)
+}
+func (m *DefragmentResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *DefragmentResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DefragmentResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DefragmentResponse proto.InternalMessageInfo
+
+func (m *DefragmentResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type MoveLeaderRequest struct {
+	// targetID is the node ID for the new leader.
+	TargetID             uint64   `protobuf:"varint,1,opt,name=targetID,proto3" json:"targetID,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MoveLeaderRequest) Reset()         { *m = MoveLeaderRequest{} }
+func (m *MoveLeaderRequest) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderRequest) ProtoMessage()    {}
+func (*MoveLeaderRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{47}
+}
+func (m *MoveLeaderRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MoveLeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MoveLeaderRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MoveLeaderRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MoveLeaderRequest.Merge(m, src)
+}
+func (m *MoveLeaderRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *MoveLeaderRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_MoveLeaderRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderRequest proto.InternalMessageInfo
+
+func (m *MoveLeaderRequest) GetTargetID() uint64 {
+	if m != nil {
+		return m.TargetID
+	}
+	return 0
+}
+
+type MoveLeaderResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *MoveLeaderResponse) Reset()         { *m = MoveLeaderResponse{} }
+func (m *MoveLeaderResponse) String() string { return proto.CompactTextString(m) }
+func (*MoveLeaderResponse) ProtoMessage()    {}
+func (*MoveLeaderResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{48}
+}
+func (m *MoveLeaderResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *MoveLeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_MoveLeaderResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *MoveLeaderResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MoveLeaderResponse.Merge(m, src)
+}
+func (m *MoveLeaderResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *MoveLeaderResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_MoveLeaderResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MoveLeaderResponse proto.InternalMessageInfo
+
+func (m *MoveLeaderResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AlarmRequest struct {
+	// action is the kind of alarm request to issue. The action
+	// may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+	// raised alarm.
+	Action AlarmRequest_AlarmAction `protobuf:"varint,1,opt,name=action,proto3,enum=etcdserverpb.AlarmRequest_AlarmAction" json:"action,omitempty"`
+	// memberID is the ID of the member associated with the alarm. If memberID is 0, the
+	// alarm request covers all members.
+	MemberID uint64 `protobuf:"varint,2,opt,name=memberID,proto3" json:"memberID,omitempty"`
+	// alarm is the type of alarm to consider for this request.
+	Alarm                AlarmType `protobuf:"varint,3,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *AlarmRequest) Reset()         { *m = AlarmRequest{} }
+func (m *AlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*AlarmRequest) ProtoMessage()    {}
+func (*AlarmRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{49}
+}
+func (m *AlarmRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmRequest.Merge(m, src)
+}
+func (m *AlarmRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmRequest proto.InternalMessageInfo
+
+func (m *AlarmRequest) GetAction() AlarmRequest_AlarmAction {
+	if m != nil {
+		return m.Action
+	}
+	return AlarmRequest_GET
+}
+
+func (m *AlarmRequest) GetMemberID() uint64 {
+	if m != nil {
+		return m.MemberID
+	}
+	return 0
+}
+
+func (m *AlarmRequest) GetAlarm() AlarmType {
+	if m != nil {
+		return m.Alarm
+	}
+	return AlarmType_NONE
+}
+
+type AlarmMember struct {
+	// memberID is the ID of the member associated with the raised alarm.
+	MemberID uint64 `protobuf:"varint,1,opt,name=memberID,proto3" json:"memberID,omitempty"`
+	// alarm is the type of alarm which has been raised.
+	Alarm                AlarmType `protobuf:"varint,2,opt,name=alarm,proto3,enum=etcdserverpb.AlarmType" json:"alarm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *AlarmMember) Reset()         { *m = AlarmMember{} }
+func (m *AlarmMember) String() string { return proto.CompactTextString(m) }
+func (*AlarmMember) ProtoMessage()    {}
+func (*AlarmMember) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{50}
+}
+func (m *AlarmMember) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmMember) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmMember.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmMember) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmMember.Merge(m, src)
+}
+func (m *AlarmMember) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmMember) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmMember.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmMember proto.InternalMessageInfo
+
+func (m *AlarmMember) GetMemberID() uint64 {
+	if m != nil {
+		return m.MemberID
+	}
+	return 0
+}
+
+func (m *AlarmMember) GetAlarm() AlarmType {
+	if m != nil {
+		return m.Alarm
+	}
+	return AlarmType_NONE
+}
+
+type AlarmResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// alarms is a list of alarms associated with the alarm request.
+	Alarms               []*AlarmMember `protobuf:"bytes,2,rep,name=alarms,proto3" json:"alarms,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *AlarmResponse) Reset()         { *m = AlarmResponse{} }
+func (m *AlarmResponse) String() string { return proto.CompactTextString(m) }
+func (*AlarmResponse) ProtoMessage()    {}
+func (*AlarmResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{51}
+}
+func (m *AlarmResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AlarmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AlarmResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AlarmResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmResponse.Merge(m, src)
+}
+func (m *AlarmResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AlarmResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmResponse proto.InternalMessageInfo
+
+func (m *AlarmResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AlarmResponse) GetAlarms() []*AlarmMember {
+	if m != nil {
+		return m.Alarms
+	}
+	return nil
+}
+
+type StatusRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StatusRequest) Reset()         { *m = StatusRequest{} }
+func (m *StatusRequest) String() string { return proto.CompactTextString(m) }
+func (*StatusRequest) ProtoMessage()    {}
+func (*StatusRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{52}
+}
+func (m *StatusRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusRequest.Merge(m, src)
+}
+func (m *StatusRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusRequest proto.InternalMessageInfo
+
+type StatusResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// version is the cluster protocol version used by the responding member.
+	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	// dbSize is the size of the backend database, in bytes, of the responding member.
+	DbSize int64 `protobuf:"varint,3,opt,name=dbSize,proto3" json:"dbSize,omitempty"`
+	// leader is the member ID which the responding member believes is the current leader.
+	Leader uint64 `protobuf:"varint,4,opt,name=leader,proto3" json:"leader,omitempty"`
+	// raftIndex is the current raft index of the responding member.
+	RaftIndex uint64 `protobuf:"varint,5,opt,name=raftIndex,proto3" json:"raftIndex,omitempty"`
+	// raftTerm is the current raft term of the responding member.
+	RaftTerm             uint64   `protobuf:"varint,6,opt,name=raftTerm,proto3" json:"raftTerm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *StatusResponse) Reset()         { *m = StatusResponse{} }
+func (m *StatusResponse) String() string { return proto.CompactTextString(m) }
+func (*StatusResponse) ProtoMessage()    {}
+func (*StatusResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{53}
+}
+func (m *StatusResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *StatusResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_StatusResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *StatusResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_StatusResponse.Merge(m, src)
+}
+func (m *StatusResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *StatusResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_StatusResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_StatusResponse proto.InternalMessageInfo
+
+func (m *StatusResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *StatusResponse) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *StatusResponse) GetDbSize() int64 {
+	if m != nil {
+		return m.DbSize
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetLeader() uint64 {
+	if m != nil {
+		return m.Leader
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftIndex() uint64 {
+	if m != nil {
+		return m.RaftIndex
+	}
+	return 0
+}
+
+func (m *StatusResponse) GetRaftTerm() uint64 {
+	if m != nil {
+		return m.RaftTerm
+	}
+	return 0
+}
+
+type AuthEnableRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthEnableRequest) Reset()         { *m = AuthEnableRequest{} }
+func (m *AuthEnableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableRequest) ProtoMessage()    {}
+func (*AuthEnableRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{54}
+}
+func (m *AuthEnableRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthEnableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthEnableRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthEnableRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthEnableRequest.Merge(m, src)
+}
+func (m *AuthEnableRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthEnableRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthEnableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableRequest proto.InternalMessageInfo
+
+type AuthDisableRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthDisableRequest) Reset()         { *m = AuthDisableRequest{} }
+func (m *AuthDisableRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableRequest) ProtoMessage()    {}
+func (*AuthDisableRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{55}
+}
+func (m *AuthDisableRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthDisableRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthDisableRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthDisableRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthDisableRequest.Merge(m, src)
+}
+func (m *AuthDisableRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthDisableRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthDisableRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableRequest proto.InternalMessageInfo
+
+type AuthenticateRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthenticateRequest) Reset()         { *m = AuthenticateRequest{} }
+func (m *AuthenticateRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateRequest) ProtoMessage()    {}
+func (*AuthenticateRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{56}
+}
+func (m *AuthenticateRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthenticateRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthenticateRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthenticateRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthenticateRequest.Merge(m, src)
+}
+func (m *AuthenticateRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthenticateRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthenticateRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateRequest proto.InternalMessageInfo
+
+func (m *AuthenticateRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthenticateRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserAddRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserAddRequest) Reset()         { *m = AuthUserAddRequest{} }
+func (m *AuthUserAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddRequest) ProtoMessage()    {}
+func (*AuthUserAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{57}
+}
+func (m *AuthUserAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserAddRequest.Merge(m, src)
+}
+func (m *AuthUserAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddRequest proto.InternalMessageInfo
+
+func (m *AuthUserAddRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserAddRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserGetRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserGetRequest) Reset()         { *m = AuthUserGetRequest{} }
+func (m *AuthUserGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetRequest) ProtoMessage()    {}
+func (*AuthUserGetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{58}
+}
+func (m *AuthUserGetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGetRequest.Merge(m, src)
+}
+func (m *AuthUserGetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetRequest proto.InternalMessageInfo
+
+func (m *AuthUserGetRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthUserDeleteRequest struct {
+	// name is the name of the user to delete.
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserDeleteRequest) Reset()         { *m = AuthUserDeleteRequest{} }
+func (m *AuthUserDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteRequest) ProtoMessage()    {}
+func (*AuthUserDeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{59}
+}
+func (m *AuthUserDeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserDeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserDeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserDeleteRequest.Merge(m, src)
+}
+func (m *AuthUserDeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserDeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthUserDeleteRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthUserChangePasswordRequest struct {
+	// name is the name of the user whose password is being changed.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// password is the new password for the user.
+	Password             string   `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserChangePasswordRequest) Reset()         { *m = AuthUserChangePasswordRequest{} }
+func (m *AuthUserChangePasswordRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordRequest) ProtoMessage()    {}
+func (*AuthUserChangePasswordRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{60}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserChangePasswordRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserChangePasswordRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserChangePasswordRequest.Merge(m, src)
+}
+func (m *AuthUserChangePasswordRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserChangePasswordRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserChangePasswordRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordRequest proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserChangePasswordRequest) GetPassword() string {
+	if m != nil {
+		return m.Password
+	}
+	return ""
+}
+
+type AuthUserGrantRoleRequest struct {
+	// user is the name of the user which should be granted a given role.
+	User string `protobuf:"bytes,1,opt,name=user,proto3" json:"user,omitempty"`
+	// role is the name of the role to grant to the user.
+	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserGrantRoleRequest) Reset()         { *m = AuthUserGrantRoleRequest{} }
+func (m *AuthUserGrantRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleRequest) ProtoMessage()    {}
+func (*AuthUserGrantRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{61}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGrantRoleRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGrantRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGrantRoleRequest.Merge(m, src)
+}
+func (m *AuthUserGrantRoleRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGrantRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGrantRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleRequest) GetUser() string {
+	if m != nil {
+		return m.User
+	}
+	return ""
+}
+
+func (m *AuthUserGrantRoleRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthUserRevokeRoleRequest struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Role                 string   `protobuf:"bytes,2,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleRequest) Reset()         { *m = AuthUserRevokeRoleRequest{} }
+func (m *AuthUserRevokeRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleRequest) ProtoMessage()    {}
+func (*AuthUserRevokeRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{62}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserRevokeRoleRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserRevokeRoleRequest.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserRevokeRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserRevokeRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleRequest proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthUserRevokeRoleRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthRoleAddRequest struct {
+	// name is the name of the role to add to the authentication system.
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleAddRequest) Reset()         { *m = AuthRoleAddRequest{} }
+func (m *AuthRoleAddRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddRequest) ProtoMessage()    {}
+func (*AuthRoleAddRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{63}
+}
+func (m *AuthRoleAddRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleAddRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleAddRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleAddRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleAddRequest.Merge(m, src)
+}
+func (m *AuthRoleAddRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleAddRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleAddRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddRequest proto.InternalMessageInfo
+
+func (m *AuthRoleAddRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AuthRoleGetRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleGetRequest) Reset()         { *m = AuthRoleGetRequest{} }
+func (m *AuthRoleGetRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetRequest) ProtoMessage()    {}
+func (*AuthRoleGetRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{64}
+}
+func (m *AuthRoleGetRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGetRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGetRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGetRequest.Merge(m, src)
+}
+func (m *AuthRoleGetRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGetRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGetRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGetRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthUserListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthUserListRequest) Reset()         { *m = AuthUserListRequest{} }
+func (m *AuthUserListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListRequest) ProtoMessage()    {}
+func (*AuthUserListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{65}
+}
+func (m *AuthUserListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserListRequest.Merge(m, src)
+}
+func (m *AuthUserListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListRequest proto.InternalMessageInfo
+
+type AuthRoleListRequest struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleListRequest) Reset()         { *m = AuthRoleListRequest{} }
+func (m *AuthRoleListRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListRequest) ProtoMessage()    {}
+func (*AuthRoleListRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{66}
+}
+func (m *AuthRoleListRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleListRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleListRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleListRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleListRequest.Merge(m, src)
+}
+func (m *AuthRoleListRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleListRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleListRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListRequest proto.InternalMessageInfo
+
+type AuthRoleDeleteRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleDeleteRequest) Reset()         { *m = AuthRoleDeleteRequest{} }
+func (m *AuthRoleDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteRequest) ProtoMessage()    {}
+func (*AuthRoleDeleteRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{67}
+}
+func (m *AuthRoleDeleteRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleDeleteRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleDeleteRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleDeleteRequest.Merge(m, src)
+}
+func (m *AuthRoleDeleteRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleDeleteRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleDeleteRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteRequest proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+type AuthRoleGrantPermissionRequest struct {
+	// name is the name of the role which will be granted the permission.
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// perm is the permission to grant to the role.
+	Perm                 *authpb.Permission `protobuf:"bytes,2,opt,name=perm,proto3" json:"perm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionRequest) Reset()         { *m = AuthRoleGrantPermissionRequest{} }
+func (m *AuthRoleGrantPermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionRequest) ProtoMessage()    {}
+func (*AuthRoleGrantPermissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{68}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGrantPermissionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGrantPermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGrantPermissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGrantPermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionRequest) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AuthRoleGrantPermissionRequest) GetPerm() *authpb.Permission {
+	if m != nil {
+		return m.Perm
+	}
+	return nil
+}
+
+type AuthRoleRevokePermissionRequest struct {
+	Role                 string   `protobuf:"bytes,1,opt,name=role,proto3" json:"role,omitempty"`
+	Key                  string   `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
+	RangeEnd             string   `protobuf:"bytes,3,opt,name=range_end,json=rangeEnd,proto3" json:"range_end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionRequest) Reset()         { *m = AuthRoleRevokePermissionRequest{} }
+func (m *AuthRoleRevokePermissionRequest) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionRequest) ProtoMessage()    {}
+func (*AuthRoleRevokePermissionRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{69}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleRevokePermissionRequest.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleRevokePermissionRequest.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleRevokePermissionRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleRevokePermissionRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionRequest proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionRequest) GetRole() string {
+	if m != nil {
+		return m.Role
+	}
+	return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetKey() string {
+	if m != nil {
+		return m.Key
+	}
+	return ""
+}
+
+func (m *AuthRoleRevokePermissionRequest) GetRangeEnd() string {
+	if m != nil {
+		return m.RangeEnd
+	}
+	return ""
+}
+
+type AuthEnableResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthEnableResponse) Reset()         { *m = AuthEnableResponse{} }
+func (m *AuthEnableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthEnableResponse) ProtoMessage()    {}
+func (*AuthEnableResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{70}
+}
+func (m *AuthEnableResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthEnableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthEnableResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthEnableResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthEnableResponse.Merge(m, src)
+}
+func (m *AuthEnableResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthEnableResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthEnableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthEnableResponse proto.InternalMessageInfo
+
+func (m *AuthEnableResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthDisableResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthDisableResponse) Reset()         { *m = AuthDisableResponse{} }
+func (m *AuthDisableResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthDisableResponse) ProtoMessage()    {}
+func (*AuthDisableResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{71}
+}
+func (m *AuthDisableResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthDisableResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthDisableResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthDisableResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthDisableResponse.Merge(m, src)
+}
+func (m *AuthDisableResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthDisableResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthDisableResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthDisableResponse proto.InternalMessageInfo
+
+func (m *AuthDisableResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthenticateResponse struct {
+	Header *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// token is an authorized token that can be used in succeeding RPCs
+	Token                string   `protobuf:"bytes,2,opt,name=token,proto3" json:"token,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AuthenticateResponse) Reset()         { *m = AuthenticateResponse{} }
+func (m *AuthenticateResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthenticateResponse) ProtoMessage()    {}
+func (*AuthenticateResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{72}
+}
+func (m *AuthenticateResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthenticateResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthenticateResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthenticateResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthenticateResponse.Merge(m, src)
+}
+func (m *AuthenticateResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthenticateResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthenticateResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthenticateResponse proto.InternalMessageInfo
+
+func (m *AuthenticateResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthenticateResponse) GetToken() string {
+	if m != nil {
+		return m.Token
+	}
+	return ""
+}
+
+type AuthUserAddResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserAddResponse) Reset()         { *m = AuthUserAddResponse{} }
+func (m *AuthUserAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserAddResponse) ProtoMessage()    {}
+func (*AuthUserAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{73}
+}
+func (m *AuthUserAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserAddResponse.Merge(m, src)
+}
+func (m *AuthUserAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserAddResponse proto.InternalMessageInfo
+
+func (m *AuthUserAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserGetResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserGetResponse) Reset()         { *m = AuthUserGetResponse{} }
+func (m *AuthUserGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGetResponse) ProtoMessage()    {}
+func (*AuthUserGetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{74}
+}
+func (m *AuthUserGetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGetResponse.Merge(m, src)
+}
+func (m *AuthUserGetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGetResponse proto.InternalMessageInfo
+
+func (m *AuthUserGetResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthUserGetResponse) GetRoles() []string {
+	if m != nil {
+		return m.Roles
+	}
+	return nil
+}
+
+type AuthUserDeleteResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserDeleteResponse) Reset()         { *m = AuthUserDeleteResponse{} }
+func (m *AuthUserDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserDeleteResponse) ProtoMessage()    {}
+func (*AuthUserDeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{75}
+}
+func (m *AuthUserDeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserDeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserDeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserDeleteResponse.Merge(m, src)
+}
+func (m *AuthUserDeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserDeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthUserDeleteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserChangePasswordResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserChangePasswordResponse) Reset()         { *m = AuthUserChangePasswordResponse{} }
+func (m *AuthUserChangePasswordResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserChangePasswordResponse) ProtoMessage()    {}
+func (*AuthUserChangePasswordResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{76}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserChangePasswordResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserChangePasswordResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserChangePasswordResponse.Merge(m, src)
+}
+func (m *AuthUserChangePasswordResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserChangePasswordResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserChangePasswordResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserChangePasswordResponse proto.InternalMessageInfo
+
+func (m *AuthUserChangePasswordResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserGrantRoleResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserGrantRoleResponse) Reset()         { *m = AuthUserGrantRoleResponse{} }
+func (m *AuthUserGrantRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserGrantRoleResponse) ProtoMessage()    {}
+func (*AuthUserGrantRoleResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{77}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserGrantRoleResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserGrantRoleResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserGrantRoleResponse.Merge(m, src)
+}
+func (m *AuthUserGrantRoleResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserGrantRoleResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserGrantRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserGrantRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserGrantRoleResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthUserRevokeRoleResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserRevokeRoleResponse) Reset()         { *m = AuthUserRevokeRoleResponse{} }
+func (m *AuthUserRevokeRoleResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserRevokeRoleResponse) ProtoMessage()    {}
+func (*AuthUserRevokeRoleResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{78}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserRevokeRoleResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserRevokeRoleResponse.Merge(m, src)
+}
+func (m *AuthUserRevokeRoleResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserRevokeRoleResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserRevokeRoleResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserRevokeRoleResponse proto.InternalMessageInfo
+
+func (m *AuthUserRevokeRoleResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleAddResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleAddResponse) Reset()         { *m = AuthRoleAddResponse{} }
+func (m *AuthRoleAddResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleAddResponse) ProtoMessage()    {}
+func (*AuthRoleAddResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{79}
+}
+func (m *AuthRoleAddResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleAddResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleAddResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleAddResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleAddResponse.Merge(m, src)
+}
+func (m *AuthRoleAddResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleAddResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleAddResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleAddResponse proto.InternalMessageInfo
+
+func (m *AuthRoleAddResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleGetResponse struct {
+	Header               *ResponseHeader      `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Perm                 []*authpb.Permission `protobuf:"bytes,2,rep,name=perm,proto3" json:"perm,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *AuthRoleGetResponse) Reset()         { *m = AuthRoleGetResponse{} }
+func (m *AuthRoleGetResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGetResponse) ProtoMessage()    {}
+func (*AuthRoleGetResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{80}
+}
+func (m *AuthRoleGetResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGetResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGetResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGetResponse.Merge(m, src)
+}
+func (m *AuthRoleGetResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGetResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGetResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGetResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGetResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthRoleGetResponse) GetPerm() []*authpb.Permission {
+	if m != nil {
+		return m.Perm
+	}
+	return nil
+}
+
+type AuthRoleListResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Roles                []string        `protobuf:"bytes,2,rep,name=roles,proto3" json:"roles,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleListResponse) Reset()         { *m = AuthRoleListResponse{} }
+func (m *AuthRoleListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleListResponse) ProtoMessage()    {}
+func (*AuthRoleListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{81}
+}
+func (m *AuthRoleListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleListResponse.Merge(m, src)
+}
+func (m *AuthRoleListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleListResponse proto.InternalMessageInfo
+
+func (m *AuthRoleListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthRoleListResponse) GetRoles() []string {
+	if m != nil {
+		return m.Roles
+	}
+	return nil
+}
+
+type AuthUserListResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Users                []string        `protobuf:"bytes,2,rep,name=users,proto3" json:"users,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthUserListResponse) Reset()         { *m = AuthUserListResponse{} }
+func (m *AuthUserListResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthUserListResponse) ProtoMessage()    {}
+func (*AuthUserListResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{82}
+}
+func (m *AuthUserListResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthUserListResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthUserListResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthUserListResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthUserListResponse.Merge(m, src)
+}
+func (m *AuthUserListResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthUserListResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthUserListResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthUserListResponse proto.InternalMessageInfo
+
+func (m *AuthUserListResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *AuthUserListResponse) GetUsers() []string {
+	if m != nil {
+		return m.Users
+	}
+	return nil
+}
+
+type AuthRoleDeleteResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleDeleteResponse) Reset()         { *m = AuthRoleDeleteResponse{} }
+func (m *AuthRoleDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleDeleteResponse) ProtoMessage()    {}
+func (*AuthRoleDeleteResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{83}
+}
+func (m *AuthRoleDeleteResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleDeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleDeleteResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleDeleteResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleDeleteResponse.Merge(m, src)
+}
+func (m *AuthRoleDeleteResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleDeleteResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleDeleteResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleDeleteResponse proto.InternalMessageInfo
+
+func (m *AuthRoleDeleteResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleGrantPermissionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleGrantPermissionResponse) Reset()         { *m = AuthRoleGrantPermissionResponse{} }
+func (m *AuthRoleGrantPermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleGrantPermissionResponse) ProtoMessage()    {}
+func (*AuthRoleGrantPermissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{84}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleGrantPermissionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleGrantPermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleGrantPermissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleGrantPermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleGrantPermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleGrantPermissionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type AuthRoleRevokePermissionResponse struct {
+	Header               *ResponseHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *AuthRoleRevokePermissionResponse) Reset()         { *m = AuthRoleRevokePermissionResponse{} }
+func (m *AuthRoleRevokePermissionResponse) String() string { return proto.CompactTextString(m) }
+func (*AuthRoleRevokePermissionResponse) ProtoMessage()    {}
+func (*AuthRoleRevokePermissionResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_77a6da22d6a3feb1, []int{85}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_AuthRoleRevokePermissionResponse.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AuthRoleRevokePermissionResponse.Merge(m, src)
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_Size() int {
+	return m.Size()
+}
+func (m *AuthRoleRevokePermissionResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_AuthRoleRevokePermissionResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AuthRoleRevokePermissionResponse proto.InternalMessageInfo
+
+func (m *AuthRoleRevokePermissionResponse) GetHeader() *ResponseHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("etcdserverpb.AlarmType", AlarmType_name, AlarmType_value)
+	proto.RegisterEnum("etcdserverpb.RangeRequest_SortOrder", RangeRequest_SortOrder_name, RangeRequest_SortOrder_value)
+	proto.RegisterEnum("etcdserverpb.RangeRequest_SortTarget", RangeRequest_SortTarget_name, RangeRequest_SortTarget_value)
+	proto.RegisterEnum("etcdserverpb.Compare_CompareResult", Compare_CompareResult_name, Compare_CompareResult_value)
+	proto.RegisterEnum("etcdserverpb.Compare_CompareTarget", Compare_CompareTarget_name, Compare_CompareTarget_value)
+	proto.RegisterEnum("etcdserverpb.WatchCreateRequest_FilterType", WatchCreateRequest_FilterType_name, WatchCreateRequest_FilterType_value)
+	proto.RegisterEnum("etcdserverpb.AlarmRequest_AlarmAction", AlarmRequest_AlarmAction_name, AlarmRequest_AlarmAction_value)
+	proto.RegisterType((*ResponseHeader)(nil), "etcdserverpb.ResponseHeader")
+	proto.RegisterType((*RangeRequest)(nil), "etcdserverpb.RangeRequest")
+	proto.RegisterType((*RangeResponse)(nil), "etcdserverpb.RangeResponse")
+	proto.RegisterType((*PutRequest)(nil), "etcdserverpb.PutRequest")
+	proto.RegisterType((*PutResponse)(nil), "etcdserverpb.PutResponse")
+	proto.RegisterType((*DeleteRangeRequest)(nil), "etcdserverpb.DeleteRangeRequest")
+	proto.RegisterType((*DeleteRangeResponse)(nil), "etcdserverpb.DeleteRangeResponse")
+	proto.RegisterType((*RequestOp)(nil), "etcdserverpb.RequestOp")
+	proto.RegisterType((*ResponseOp)(nil), "etcdserverpb.ResponseOp")
+	proto.RegisterType((*Compare)(nil), "etcdserverpb.Compare")
+	proto.RegisterType((*TxnRequest)(nil), "etcdserverpb.TxnRequest")
+	proto.RegisterType((*TxnResponse)(nil), "etcdserverpb.TxnResponse")
+	proto.RegisterType((*CompactionRequest)(nil), "etcdserverpb.CompactionRequest")
+	proto.RegisterType((*CompactionResponse)(nil), "etcdserverpb.CompactionResponse")
+	proto.RegisterType((*HashRequest)(nil), "etcdserverpb.HashRequest")
+	proto.RegisterType((*HashKVRequest)(nil), "etcdserverpb.HashKVRequest")
+	proto.RegisterType((*HashKVResponse)(nil), "etcdserverpb.HashKVResponse")
+	proto.RegisterType((*HashResponse)(nil), "etcdserverpb.HashResponse")
+	proto.RegisterType((*SnapshotRequest)(nil), "etcdserverpb.SnapshotRequest")
+	proto.RegisterType((*SnapshotResponse)(nil), "etcdserverpb.SnapshotResponse")
+	proto.RegisterType((*WatchRequest)(nil), "etcdserverpb.WatchRequest")
+	proto.RegisterType((*WatchCreateRequest)(nil), "etcdserverpb.WatchCreateRequest")
+	proto.RegisterType((*WatchCancelRequest)(nil), "etcdserverpb.WatchCancelRequest")
+	proto.RegisterType((*WatchProgressRequest)(nil), "etcdserverpb.WatchProgressRequest")
+	proto.RegisterType((*WatchResponse)(nil), "etcdserverpb.WatchResponse")
+	proto.RegisterType((*LeaseGrantRequest)(nil), "etcdserverpb.LeaseGrantRequest")
+	proto.RegisterType((*LeaseGrantResponse)(nil), "etcdserverpb.LeaseGrantResponse")
+	proto.RegisterType((*LeaseRevokeRequest)(nil), "etcdserverpb.LeaseRevokeRequest")
+	proto.RegisterType((*LeaseRevokeResponse)(nil), "etcdserverpb.LeaseRevokeResponse")
+	proto.RegisterType((*LeaseKeepAliveRequest)(nil), "etcdserverpb.LeaseKeepAliveRequest")
+	proto.RegisterType((*LeaseKeepAliveResponse)(nil), "etcdserverpb.LeaseKeepAliveResponse")
+	proto.RegisterType((*LeaseTimeToLiveRequest)(nil), "etcdserverpb.LeaseTimeToLiveRequest")
+	proto.RegisterType((*LeaseTimeToLiveResponse)(nil), "etcdserverpb.LeaseTimeToLiveResponse")
+	proto.RegisterType((*LeaseLeasesRequest)(nil), "etcdserverpb.LeaseLeasesRequest")
+	proto.RegisterType((*LeaseStatus)(nil), "etcdserverpb.LeaseStatus")
+	proto.RegisterType((*LeaseLeasesResponse)(nil), "etcdserverpb.LeaseLeasesResponse")
+	proto.RegisterType((*Member)(nil), "etcdserverpb.Member")
+	proto.RegisterType((*MemberAddRequest)(nil), "etcdserverpb.MemberAddRequest")
+	proto.RegisterType((*MemberAddResponse)(nil), "etcdserverpb.MemberAddResponse")
+	proto.RegisterType((*MemberRemoveRequest)(nil), "etcdserverpb.MemberRemoveRequest")
+	proto.RegisterType((*MemberRemoveResponse)(nil), "etcdserverpb.MemberRemoveResponse")
+	proto.RegisterType((*MemberUpdateRequest)(nil), "etcdserverpb.MemberUpdateRequest")
+	proto.RegisterType((*MemberUpdateResponse)(nil), "etcdserverpb.MemberUpdateResponse")
+	proto.RegisterType((*MemberListRequest)(nil), "etcdserverpb.MemberListRequest")
+	proto.RegisterType((*MemberListResponse)(nil), "etcdserverpb.MemberListResponse")
+	proto.RegisterType((*DefragmentRequest)(nil), "etcdserverpb.DefragmentRequest")
+	proto.RegisterType((*DefragmentResponse)(nil), "etcdserverpb.DefragmentResponse")
+	proto.RegisterType((*MoveLeaderRequest)(nil), "etcdserverpb.MoveLeaderRequest")
+	proto.RegisterType((*MoveLeaderResponse)(nil), "etcdserverpb.MoveLeaderResponse")
+	proto.RegisterType((*AlarmRequest)(nil), "etcdserverpb.AlarmRequest")
+	proto.RegisterType((*AlarmMember)(nil), "etcdserverpb.AlarmMember")
+	proto.RegisterType((*AlarmResponse)(nil), "etcdserverpb.AlarmResponse")
+	proto.RegisterType((*StatusRequest)(nil), "etcdserverpb.StatusRequest")
+	proto.RegisterType((*StatusResponse)(nil), "etcdserverpb.StatusResponse")
+	proto.RegisterType((*AuthEnableRequest)(nil), "etcdserverpb.AuthEnableRequest")
+	proto.RegisterType((*AuthDisableRequest)(nil), "etcdserverpb.AuthDisableRequest")
+	proto.RegisterType((*AuthenticateRequest)(nil), "etcdserverpb.AuthenticateRequest")
+	proto.RegisterType((*AuthUserAddRequest)(nil), "etcdserverpb.AuthUserAddRequest")
+	proto.RegisterType((*AuthUserGetRequest)(nil), "etcdserverpb.AuthUserGetRequest")
+	proto.RegisterType((*AuthUserDeleteRequest)(nil), "etcdserverpb.AuthUserDeleteRequest")
+	proto.RegisterType((*AuthUserChangePasswordRequest)(nil), "etcdserverpb.AuthUserChangePasswordRequest")
+	proto.RegisterType((*AuthUserGrantRoleRequest)(nil), "etcdserverpb.AuthUserGrantRoleRequest")
+	proto.RegisterType((*AuthUserRevokeRoleRequest)(nil), "etcdserverpb.AuthUserRevokeRoleRequest")
+	proto.RegisterType((*AuthRoleAddRequest)(nil), "etcdserverpb.AuthRoleAddRequest")
+	proto.RegisterType((*AuthRoleGetRequest)(nil), "etcdserverpb.AuthRoleGetRequest")
+	proto.RegisterType((*AuthUserListRequest)(nil), "etcdserverpb.AuthUserListRequest")
+	proto.RegisterType((*AuthRoleListRequest)(nil), "etcdserverpb.AuthRoleListRequest")
+	proto.RegisterType((*AuthRoleDeleteRequest)(nil), "etcdserverpb.AuthRoleDeleteRequest")
+	proto.RegisterType((*AuthRoleGrantPermissionRequest)(nil), "etcdserverpb.AuthRoleGrantPermissionRequest")
+	proto.RegisterType((*AuthRoleRevokePermissionRequest)(nil), "etcdserverpb.AuthRoleRevokePermissionRequest")
+	proto.RegisterType((*AuthEnableResponse)(nil), "etcdserverpb.AuthEnableResponse")
+	proto.RegisterType((*AuthDisableResponse)(nil), "etcdserverpb.AuthDisableResponse")
+	proto.RegisterType((*AuthenticateResponse)(nil), "etcdserverpb.AuthenticateResponse")
+	proto.RegisterType((*AuthUserAddResponse)(nil), "etcdserverpb.AuthUserAddResponse")
+	proto.RegisterType((*AuthUserGetResponse)(nil), "etcdserverpb.AuthUserGetResponse")
+	proto.RegisterType((*AuthUserDeleteResponse)(nil), "etcdserverpb.AuthUserDeleteResponse")
+	proto.RegisterType((*AuthUserChangePasswordResponse)(nil), "etcdserverpb.AuthUserChangePasswordResponse")
+	proto.RegisterType((*AuthUserGrantRoleResponse)(nil), "etcdserverpb.AuthUserGrantRoleResponse")
+	proto.RegisterType((*AuthUserRevokeRoleResponse)(nil), "etcdserverpb.AuthUserRevokeRoleResponse")
+	proto.RegisterType((*AuthRoleAddResponse)(nil), "etcdserverpb.AuthRoleAddResponse")
+	proto.RegisterType((*AuthRoleGetResponse)(nil), "etcdserverpb.AuthRoleGetResponse")
+	proto.RegisterType((*AuthRoleListResponse)(nil), "etcdserverpb.AuthRoleListResponse")
+	proto.RegisterType((*AuthUserListResponse)(nil), "etcdserverpb.AuthUserListResponse")
+	proto.RegisterType((*AuthRoleDeleteResponse)(nil), "etcdserverpb.AuthRoleDeleteResponse")
+	proto.RegisterType((*AuthRoleGrantPermissionResponse)(nil), "etcdserverpb.AuthRoleGrantPermissionResponse")
+	proto.RegisterType((*AuthRoleRevokePermissionResponse)(nil), "etcdserverpb.AuthRoleRevokePermissionResponse")
+}
+
+func init() { proto.RegisterFile("rpc.proto", fileDescriptor_77a6da22d6a3feb1) }
+
+var fileDescriptor_77a6da22d6a3feb1 = []byte{
+	// 3711 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x5b, 0x5b, 0x73, 0x1b, 0x47,
+	0x76, 0xe6, 0x00, 0x04, 0x40, 0x1c, 0x5c, 0x08, 0x35, 0x29, 0x09, 0x84, 0x24, 0x8a, 0x6a, 0xdd,
+	0xa8, 0x8b, 0x09, 0x9b, 0x76, 0xf2, 0xa0, 0xa4, 0x5c, 0xa6, 0x48, 0x58, 0xa4, 0x49, 0x91, 0xf4,
+	0x10, 0x94, 0x9d, 0x2a, 0x27, 0xac, 0x21, 0xd0, 0x22, 0x11, 0x02, 0x33, 0xc8, 0xcc, 0x00, 0x22,
+	0x15, 0x57, 0x52, 0xe5, 0x38, 0xae, 0x3c, 0xc7, 0x55, 0xa9, 0x24, 0xaf, 0x5b, 0x5b, 0x2e, 0xff,
+	0x82, 0xfd, 0x0b, 0x5b, 0xfb, 0xb2, 0xbb, 0xb5, 0x7f, 0x60, 0xcb, 0xbb, 0x2f, 0xfb, 0x0b, 0xf6,
+	0xf2, 0xb4, 0xd5, 0xb7, 0x99, 0x9e, 0x1b, 0x48, 0x1b, 0xb6, 0x5f, 0xa8, 0xe9, 0xd3, 0xa7, 0xcf,
+	0x39, 0x7d, 0xba, 0xcf, 0x39, 0xdd, 0x5f, 0x43, 0x90, 0xb7, 0xfb, 0xad, 0xa5, 0xbe, 0x6d, 0xb9,
+	0x16, 0x2a, 0x12, 0xb7, 0xd5, 0x76, 0x88, 0x3d, 0x24, 0x76, 0xff, 0xb0, 0x36, 0x7b, 0x64, 0x1d,
+	0x59, 0xac, 0xa3, 0x4e, 0xbf, 0x38, 0x4f, 0x6d, 0x8e, 0xf2, 0xd4, 0x7b, 0xc3, 0x56, 0x8b, 0xfd,
+	0xe9, 0x1f, 0xd6, 0x4f, 0x86, 0xa2, 0xeb, 0x1a, 0xeb, 0x32, 0x06, 0xee, 0x31, 0xfb, 0xd3, 0x3f,
+	0x64, 0xff, 0x88, 0xce, 0xeb, 0x47, 0x96, 0x75, 0xd4, 0x25, 0x75, 0xa3, 0xdf, 0xa9, 0x1b, 0xa6,
+	0x69, 0xb9, 0x86, 0xdb, 0xb1, 0x4c, 0x87, 0xf7, 0xe2, 0xff, 0xd4, 0xa0, 0xac, 0x13, 0xa7, 0x6f,
+	0x99, 0x0e, 0x59, 0x27, 0x46, 0x9b, 0xd8, 0xe8, 0x06, 0x40, 0xab, 0x3b, 0x70, 0x5c, 0x62, 0x1f,
+	0x74, 0xda, 0x55, 0x6d, 0x41, 0x5b, 0x9c, 0xd4, 0xf3, 0x82, 0xb2, 0xd1, 0x46, 0xd7, 0x20, 0xdf,
+	0x23, 0xbd, 0x43, 0xde, 0x9b, 0x62, 0xbd, 0x53, 0x9c, 0xb0, 0xd1, 0x46, 0x35, 0x98, 0xb2, 0xc9,
+	0xb0, 0xe3, 0x74, 0x2c, 0xb3, 0x9a, 0x5e, 0xd0, 0x16, 0xd3, 0xba, 0xd7, 0xa6, 0x03, 0x6d, 0xe3,
+	0xa5, 0x7b, 0xe0, 0x12, 0xbb, 0x57, 0x9d, 0xe4, 0x03, 0x29, 0xa1, 0x49, 0xec, 0x1e, 0xfe, 0x3c,
+	0x03, 0x45, 0xdd, 0x30, 0x8f, 0x88, 0x4e, 0xfe, 0x65, 0x40, 0x1c, 0x17, 0x55, 0x20, 0x7d, 0x42,
+	0xce, 0x98, 0xfa, 0xa2, 0x4e, 0x3f, 0xf9, 0x78, 0xf3, 0x88, 0x1c, 0x10, 0x93, 0x2b, 0x2e, 0xd2,
+	0xf1, 0xe6, 0x11, 0x69, 0x98, 0x6d, 0x34, 0x0b, 0x99, 0x6e, 0xa7, 0xd7, 0x71, 0x85, 0x56, 0xde,
+	0x08, 0x98, 0x33, 0x19, 0x32, 0x67, 0x15, 0xc0, 0xb1, 0x6c, 0xf7, 0xc0, 0xb2, 0xdb, 0xc4, 0xae,
+	0x66, 0x16, 0xb4, 0xc5, 0xf2, 0xf2, 0x9d, 0x25, 0x75, 0x21, 0x96, 0x54, 0x83, 0x96, 0xf6, 0x2c,
+	0xdb, 0xdd, 0xa1, 0xbc, 0x7a, 0xde, 0x91, 0x9f, 0xe8, 0x7d, 0x28, 0x30, 0x21, 0xae, 0x61, 0x1f,
+	0x11, 0xb7, 0x9a, 0x65, 0x52, 0xee, 0x9e, 0x23, 0xa5, 0xc9, 0x98, 0x75, 0xa6, 0x9e, 0x7f, 0x23,
+	0x0c, 0x45, 0x87, 0xd8, 0x1d, 0xa3, 0xdb, 0x79, 0x6d, 0x1c, 0x76, 0x49, 0x35, 0xb7, 0xa0, 0x2d,
+	0x4e, 0xe9, 0x01, 0x1a, 0x9d, 0xff, 0x09, 0x39, 0x73, 0x0e, 0x2c, 0xb3, 0x7b, 0x56, 0x9d, 0x62,
+	0x0c, 0x53, 0x94, 0xb0, 0x63, 0x76, 0xcf, 0xd8, 0xa2, 0x59, 0x03, 0xd3, 0xe5, 0xbd, 0x79, 0xd6,
+	0x9b, 0x67, 0x14, 0xd6, 0xbd, 0x08, 0x95, 0x5e, 0xc7, 0x3c, 0xe8, 0x59, 0xed, 0x03, 0xcf, 0x21,
+	0xc0, 0x1c, 0x52, 0xee, 0x75, 0xcc, 0xe7, 0x56, 0x5b, 0x97, 0x6e, 0xa1, 0x9c, 0xc6, 0x69, 0x90,
+	0xb3, 0x20, 0x38, 0x8d, 0x53, 0x95, 0x73, 0x09, 0x66, 0xa8, 0xcc, 0x96, 0x4d, 0x0c, 0x97, 0xf8,
+	0xcc, 0x45, 0xc6, 0x7c, 0xa9, 0xd7, 0x31, 0x57, 0x59, 0x4f, 0x80, 0xdf, 0x38, 0x8d, 0xf0, 0x97,
+	0x04, 0xbf, 0x71, 0x1a, 0xe4, 0xc7, 0x4b, 0x90, 0xf7, 0x7c, 0x8e, 0xa6, 0x60, 0x72, 0x7b, 0x67,
+	0xbb, 0x51, 0x99, 0x40, 0x00, 0xd9, 0x95, 0xbd, 0xd5, 0xc6, 0xf6, 0x5a, 0x45, 0x43, 0x05, 0xc8,
+	0xad, 0x35, 0x78, 0x23, 0x85, 0x9f, 0x02, 0xf8, 0xde, 0x45, 0x39, 0x48, 0x6f, 0x36, 0xfe, 0xa1,
+	0x32, 0x41, 0x79, 0x5e, 0x34, 0xf4, 0xbd, 0x8d, 0x9d, 0xed, 0x8a, 0x46, 0x07, 0xaf, 0xea, 0x8d,
+	0x95, 0x66, 0xa3, 0x92, 0xa2, 0x1c, 0xcf, 0x77, 0xd6, 0x2a, 0x69, 0x94, 0x87, 0xcc, 0x8b, 0x95,
+	0xad, 0xfd, 0x46, 0x65, 0x12, 0x7f, 0xa9, 0x41, 0x49, 0xac, 0x17, 0x8f, 0x09, 0xf4, 0x0e, 0x64,
+	0x8f, 0x59, 0x5c, 0xb0, 0xad, 0x58, 0x58, 0xbe, 0x1e, 0x5a, 0xdc, 0x40, 0xec, 0xe8, 0x82, 0x17,
+	0x61, 0x48, 0x9f, 0x0c, 0x9d, 0x6a, 0x6a, 0x21, 0xbd, 0x58, 0x58, 0xae, 0x2c, 0xf1, 0x80, 0x5d,
+	0xda, 0x24, 0x67, 0x2f, 0x8c, 0xee, 0x80, 0xe8, 0xb4, 0x13, 0x21, 0x98, 0xec, 0x59, 0x36, 0x61,
+	0x3b, 0x76, 0x4a, 0x67, 0xdf, 0x74, 0x1b, 0xb3, 0x45, 0x13, 0xbb, 0x95, 0x37, 0xf0, 0xd7, 0x1a,
+	0xc0, 0xee, 0xc0, 0x4d, 0x0e, 0x8d, 0x59, 0xc8, 0x0c, 0xa9, 0x60, 0x11, 0x16, 0xbc, 0xc1, 0x62,
+	0x82, 0x18, 0x0e, 0xf1, 0x62, 0x82, 0x36, 0xd0, 0x55, 0xc8, 0xf5, 0x6d, 0x32, 0x3c, 0x38, 0x19,
+	0x32, 0x25, 0x53, 0x7a, 0x96, 0x36, 0x37, 0x87, 0xe8, 0x16, 0x14, 0x3b, 0x47, 0xa6, 0x65, 0x93,
+	0x03, 0x2e, 0x2b, 0xc3, 0x7a, 0x0b, 0x9c, 0xc6, 0xec, 0x56, 0x58, 0xb8, 0xe0, 0xac, 0xca, 0xb2,
+	0x45, 0x49, 0xd8, 0x84, 0x02, 0x33, 0x75, 0x2c, 0xf7, 0x3d, 0xf0, 0x6d, 0x4c, 0xb1, 0x61, 0x51,
+	0x17, 0x0a, 0xab, 0xf1, 0x27, 0x80, 0xd6, 0x48, 0x97, 0xb8, 0x64, 0x9c, 0xec, 0xa1, 0xf8, 0x24,
+	0xad, 0xfa, 0x04, 0xff, 0xb7, 0x06, 0x33, 0x01, 0xf1, 0x63, 0x4d, 0xab, 0x0a, 0xb9, 0x36, 0x13,
+	0xc6, 0x2d, 0x48, 0xeb, 0xb2, 0x89, 0x1e, 0xc1, 0x94, 0x30, 0xc0, 0xa9, 0xa6, 0x13, 0x36, 0x4d,
+	0x8e, 0xdb, 0xe4, 0xe0, 0xaf, 0x53, 0x90, 0x17, 0x13, 0xdd, 0xe9, 0xa3, 0x15, 0x28, 0xd9, 0xbc,
+	0x71, 0xc0, 0xe6, 0x23, 0x2c, 0xaa, 0x25, 0x27, 0xa1, 0xf5, 0x09, 0xbd, 0x28, 0x86, 0x30, 0x32,
+	0xfa, 0x3b, 0x28, 0x48, 0x11, 0xfd, 0x81, 0x2b, 0x5c, 0x5e, 0x0d, 0x0a, 0xf0, 0xf7, 0xdf, 0xfa,
+	0x84, 0x0e, 0x82, 0x7d, 0x77, 0xe0, 0xa2, 0x26, 0xcc, 0xca, 0xc1, 0x7c, 0x36, 0xc2, 0x8c, 0x34,
+	0x93, 0xb2, 0x10, 0x94, 0x12, 0x5d, 0xaa, 0xf5, 0x09, 0x1d, 0x89, 0xf1, 0x4a, 0xa7, 0x6a, 0x92,
+	0x7b, 0xca, 0x93, 0x77, 0xc4, 0xa4, 0xe6, 0xa9, 0x19, 0x35, 0xa9, 0x79, 0x6a, 0x3e, 0xcd, 0x43,
+	0x4e, 0xb4, 0xf0, 0xcf, 0x52, 0x00, 0x72, 0x35, 0x76, 0xfa, 0x68, 0x0d, 0xca, 0xb6, 0x68, 0x05,
+	0xbc, 0x75, 0x2d, 0xd6, 0x5b, 0x62, 0x11, 0x27, 0xf4, 0x92, 0x1c, 0xc4, 0x8d, 0x7b, 0x17, 0x8a,
+	0x9e, 0x14, 0xdf, 0x61, 0x73, 0x31, 0x0e, 0xf3, 0x24, 0x14, 0xe4, 0x00, 0xea, 0xb2, 0x8f, 0xe0,
+	0xb2, 0x37, 0x3e, 0xc6, 0x67, 0xb7, 0x46, 0xf8, 0xcc, 0x13, 0x38, 0x23, 0x25, 0xa8, 0x5e, 0x53,
+	0x0d, 0xf3, 0xdd, 0x36, 0x17, 0xe3, 0xb6, 0xa8, 0x61, 0xd4, 0x71, 0x40, 0xeb, 0x25, 0x6f, 0xe2,
+	0x3f, 0xa4, 0x21, 0xb7, 0x6a, 0xf5, 0xfa, 0x86, 0x4d, 0x57, 0x23, 0x6b, 0x13, 0x67, 0xd0, 0x75,
+	0x99, 0xbb, 0xca, 0xcb, 0xb7, 0x83, 0x12, 0x05, 0x9b, 0xfc, 0x57, 0x67, 0xac, 0xba, 0x18, 0x42,
+	0x07, 0x8b, 0xf2, 0x98, 0xba, 0xc0, 0x60, 0x51, 0x1c, 0xc5, 0x10, 0x19, 0xc8, 0x69, 0x3f, 0x90,
+	0x6b, 0x90, 0x1b, 0x12, 0xdb, 0x2f, 0xe9, 0xeb, 0x13, 0xba, 0x24, 0xa0, 0x07, 0x30, 0x1d, 0x2e,
+	0x2f, 0x19, 0xc1, 0x53, 0x6e, 0x05, 0xab, 0xd1, 0x6d, 0x28, 0x06, 0x6a, 0x5c, 0x56, 0xf0, 0x15,
+	0x7a, 0x4a, 0x89, 0xbb, 0x22, 0xf3, 0x2a, 0xad, 0xc7, 0xc5, 0xf5, 0x09, 0x99, 0x59, 0xaf, 0xc8,
+	0xcc, 0x3a, 0x25, 0x46, 0x89, 0xdc, 0x1a, 0x48, 0x32, 0xef, 0x05, 0x93, 0x0c, 0x7e, 0x0f, 0x4a,
+	0x01, 0x07, 0xd1, 0xba, 0xd3, 0xf8, 0x70, 0x7f, 0x65, 0x8b, 0x17, 0xa9, 0x67, 0xac, 0x2e, 0xe9,
+	0x15, 0x8d, 0xd6, 0xba, 0xad, 0xc6, 0xde, 0x5e, 0x25, 0x85, 0x4a, 0x90, 0xdf, 0xde, 0x69, 0x1e,
+	0x70, 0xae, 0x34, 0x7e, 0xe6, 0x49, 0x10, 0x45, 0x4e, 0xa9, 0x6d, 0x13, 0x4a, 0x6d, 0xd3, 0x64,
+	0x6d, 0x4b, 0xf9, 0xb5, 0x8d, 0x95, 0xb9, 0xad, 0xc6, 0xca, 0x5e, 0xa3, 0x32, 0xf9, 0xb4, 0x0c,
+	0x45, 0xee, 0xdf, 0x83, 0x81, 0x49, 0x4b, 0xed, 0x4f, 0x34, 0x00, 0x3f, 0x9a, 0x50, 0x1d, 0x72,
+	0x2d, 0xae, 0xa7, 0xaa, 0xb1, 0x64, 0x74, 0x39, 0x76, 0xc9, 0x74, 0xc9, 0x85, 0xde, 0x82, 0x9c,
+	0x33, 0x68, 0xb5, 0x88, 0x23, 0x4b, 0xde, 0xd5, 0x70, 0x3e, 0x14, 0xd9, 0x4a, 0x97, 0x7c, 0x74,
+	0xc8, 0x4b, 0xa3, 0xd3, 0x1d, 0xb0, 0x02, 0x38, 0x7a, 0x88, 0xe0, 0xc3, 0xff, 0xa7, 0x41, 0x41,
+	0xd9, 0xbc, 0xdf, 0x31, 0x09, 0x5f, 0x87, 0x3c, 0xb3, 0x81, 0xb4, 0x45, 0x1a, 0x9e, 0xd2, 0x7d,
+	0x02, 0xfa, 0x5b, 0xc8, 0xcb, 0x08, 0x90, 0x99, 0xb8, 0x1a, 0x2f, 0x76, 0xa7, 0xaf, 0xfb, 0xac,
+	0x78, 0x13, 0x2e, 0x31, 0xaf, 0xb4, 0xe8, 0xe1, 0x5a, 0xfa, 0x51, 0x3d, 0x7e, 0x6a, 0xa1, 0xe3,
+	0x67, 0x0d, 0xa6, 0xfa, 0xc7, 0x67, 0x4e, 0xa7, 0x65, 0x74, 0x85, 0x15, 0x5e, 0x1b, 0x7f, 0x00,
+	0x48, 0x15, 0x36, 0xce, 0x74, 0x71, 0x09, 0x0a, 0xeb, 0x86, 0x73, 0x2c, 0x4c, 0xc2, 0x8f, 0xa0,
+	0x44, 0x9b, 0x9b, 0x2f, 0x2e, 0x60, 0x23, 0xbb, 0x1c, 0x48, 0xee, 0xb1, 0x7c, 0x8e, 0x60, 0xf2,
+	0xd8, 0x70, 0x8e, 0xd9, 0x44, 0x4b, 0x3a, 0xfb, 0x46, 0x0f, 0xa0, 0xd2, 0xe2, 0x93, 0x3c, 0x08,
+	0x5d, 0x19, 0xa6, 0x05, 0xdd, 0x3b, 0x09, 0x7e, 0x0c, 0x45, 0x3e, 0x87, 0xef, 0xdb, 0x08, 0x7c,
+	0x09, 0xa6, 0xf7, 0x4c, 0xa3, 0xef, 0x1c, 0x5b, 0xb2, 0xba, 0xd1, 0x49, 0x57, 0x7c, 0xda, 0x58,
+	0x1a, 0xef, 0xc3, 0xb4, 0x4d, 0x7a, 0x46, 0xc7, 0xec, 0x98, 0x47, 0x07, 0x87, 0x67, 0x2e, 0x71,
+	0xc4, 0x85, 0xa9, 0xec, 0x91, 0x9f, 0x52, 0x2a, 0x35, 0xed, 0xb0, 0x6b, 0x1d, 0x8a, 0x34, 0xc7,
+	0xbe, 0xf1, 0x17, 0x29, 0x28, 0x7e, 0x64, 0xb8, 0x2d, 0xb9, 0x74, 0x68, 0x03, 0xca, 0x5e, 0x72,
+	0x63, 0x14, 0x61, 0x4b, 0xa8, 0xc4, 0xb2, 0x31, 0xf2, 0x28, 0x2d, 0xab, 0x63, 0xa9, 0xa5, 0x12,
+	0x98, 0x28, 0xc3, 0x6c, 0x91, 0xae, 0x27, 0x2a, 0x95, 0x2c, 0x8a, 0x31, 0xaa, 0xa2, 0x54, 0x02,
+	0xda, 0x81, 0x4a, 0xdf, 0xb6, 0x8e, 0x6c, 0xe2, 0x38, 0x9e, 0x30, 0x5e, 0xc6, 0x70, 0x8c, 0xb0,
+	0x5d, 0xc1, 0xea, 0x8b, 0x9b, 0xee, 0x07, 0x49, 0x4f, 0xa7, 0xfd, 0xf3, 0x0c, 0x4f, 0x4e, 0xbf,
+	0x4e, 0x01, 0x8a, 0x4e, 0xea, 0xdb, 0x1e, 0xf1, 0xee, 0x42, 0xd9, 0x71, 0x0d, 0x3b, 0xb2, 0xd9,
+	0x4a, 0x8c, 0xea, 0x65, 0xfc, 0xfb, 0xe0, 0x19, 0x74, 0x60, 0x5a, 0x6e, 0xe7, 0xe5, 0x99, 0x38,
+	0x25, 0x97, 0x25, 0x79, 0x9b, 0x51, 0x51, 0x03, 0x72, 0x2f, 0x3b, 0x5d, 0x97, 0xd8, 0x4e, 0x35,
+	0xb3, 0x90, 0x5e, 0x2c, 0x2f, 0x3f, 0x3a, 0x6f, 0x19, 0x96, 0xde, 0x67, 0xfc, 0xcd, 0xb3, 0x3e,
+	0xd1, 0xe5, 0x58, 0xf5, 0xe4, 0x99, 0x0d, 0x9c, 0xc6, 0xe7, 0x60, 0xea, 0x15, 0x15, 0x41, 0x6f,
+	0xd9, 0x39, 0x7e, 0x58, 0x64, 0x6d, 0x7e, 0xc9, 0x7e, 0x69, 0x1b, 0x47, 0x3d, 0x62, 0xba, 0xf2,
+	0x1e, 0x28, 0xdb, 0xf8, 0x2e, 0x80, 0xaf, 0x86, 0xa6, 0xfc, 0xed, 0x9d, 0xdd, 0xfd, 0x66, 0x65,
+	0x02, 0x15, 0x61, 0x6a, 0x7b, 0x67, 0xad, 0xb1, 0xd5, 0xa0, 0xf5, 0x01, 0xd7, 0xa5, 0x4b, 0x03,
+	0x6b, 0xa9, 0xea, 0xd4, 0x02, 0x3a, 0xf1, 0x15, 0x98, 0x8d, 0x5b, 0x40, 0x7a, 0x16, 0x2d, 0x89,
+	0x5d, 0x3a, 0x56, 0xa8, 0xa8, 0xaa, 0x53, 0xc1, 0xe9, 0x56, 0x21, 0xc7, 0x77, 0x6f, 0x5b, 0x1c,
+	0xce, 0x65, 0x93, 0x3a, 0x82, 0x6f, 0x46, 0xd2, 0x16, 0xab, 0xe4, 0xb5, 0x63, 0xd3, 0x4b, 0x26,
+	0x36, 0xbd, 0xa0, 0xdb, 0x50, 0xf2, 0xa2, 0xc1, 0x70, 0xc4, 0x59, 0x20, 0xaf, 0x17, 0xe5, 0x46,
+	0xa7, 0xb4, 0x80, 0xd3, 0x73, 0x41, 0xa7, 0xa3, 0xbb, 0x90, 0x25, 0x43, 0x62, 0xba, 0x4e, 0xb5,
+	0xc0, 0x2a, 0x46, 0x49, 0x9e, 0xdd, 0x1b, 0x94, 0xaa, 0x8b, 0x4e, 0xfc, 0x37, 0x70, 0x89, 0xdd,
+	0x91, 0x9e, 0xd9, 0x86, 0xa9, 0x5e, 0xe6, 0x9a, 0xcd, 0x2d, 0xe1, 0x6e, 0xfa, 0x89, 0xca, 0x90,
+	0xda, 0x58, 0x13, 0x4e, 0x48, 0x6d, 0xac, 0xe1, 0xcf, 0x34, 0x40, 0xea, 0xb8, 0xb1, 0xfc, 0x1c,
+	0x12, 0x2e, 0xd5, 0xa7, 0x7d, 0xf5, 0xb3, 0x90, 0x21, 0xb6, 0x6d, 0xd9, 0xcc, 0xa3, 0x79, 0x9d,
+	0x37, 0xf0, 0x1d, 0x61, 0x83, 0x4e, 0x86, 0xd6, 0x89, 0x17, 0x83, 0x5c, 0x9a, 0xe6, 0x99, 0xba,
+	0x09, 0x33, 0x01, 0xae, 0xb1, 0x2a, 0xd7, 0x7d, 0xb8, 0xcc, 0x84, 0x6d, 0x12, 0xd2, 0x5f, 0xe9,
+	0x76, 0x86, 0x89, 0x5a, 0xfb, 0x70, 0x25, 0xcc, 0xf8, 0xc3, 0xfa, 0x08, 0xff, 0xbd, 0xd0, 0xd8,
+	0xec, 0xf4, 0x48, 0xd3, 0xda, 0x4a, 0xb6, 0x8d, 0x66, 0xf6, 0x13, 0x72, 0xe6, 0x88, 0x12, 0xcf,
+	0xbe, 0xf1, 0x4f, 0x35, 0xb8, 0x1a, 0x19, 0xfe, 0x03, 0xaf, 0xea, 0x3c, 0xc0, 0x11, 0xdd, 0x3e,
+	0xa4, 0x4d, 0x3b, 0x38, 0xba, 0xa0, 0x50, 0x3c, 0x3b, 0x69, 0x2e, 0x2b, 0x0a, 0x3b, 0x67, 0xc5,
+	0x9a, 0xb3, 0x3f, 0x5e, 0xc4, 0xdf, 0x80, 0x02, 0x23, 0xec, 0xb9, 0x86, 0x3b, 0x70, 0x22, 0x8b,
+	0xf1, 0x6f, 0x62, 0x0b, 0xc8, 0x41, 0x63, 0xcd, 0xeb, 0x2d, 0xc8, 0xb2, 0x83, 0xb5, 0x3c, 0x56,
+	0x86, 0x6e, 0x32, 0x8a, 0x1d, 0xba, 0x60, 0xc4, 0xc7, 0x90, 0x7d, 0xce, 0xd0, 0x48, 0xc5, 0xb2,
+	0x49, 0xb9, 0x14, 0xa6, 0xd1, 0xe3, 0x18, 0x49, 0x5e, 0x67, 0xdf, 0xec, 0x14, 0x46, 0x88, 0xbd,
+	0xaf, 0x6f, 0xf1, 0xd3, 0x5e, 0x5e, 0xf7, 0xda, 0xd4, 0x65, 0xad, 0x6e, 0x87, 0x98, 0x2e, 0xeb,
+	0x9d, 0x64, 0xbd, 0x0a, 0x05, 0x2f, 0x41, 0x85, 0x6b, 0x5a, 0x69, 0xb7, 0x95, 0xd3, 0x94, 0x27,
+	0x4f, 0x0b, 0xca, 0xc3, 0x5f, 0x69, 0x70, 0x49, 0x19, 0x30, 0x96, 0x63, 0x1e, 0x43, 0x96, 0x63,
+	0xae, 0xa2, 0x70, 0xcf, 0x06, 0x47, 0x71, 0x35, 0xba, 0xe0, 0x41, 0x4b, 0x90, 0xe3, 0x5f, 0xf2,
+	0x48, 0x1b, 0xcf, 0x2e, 0x99, 0xf0, 0x5d, 0x98, 0x11, 0x24, 0xd2, 0xb3, 0xe2, 0xf6, 0x36, 0x73,
+	0x28, 0xfe, 0x14, 0x66, 0x83, 0x6c, 0x63, 0x4d, 0x49, 0x31, 0x32, 0x75, 0x11, 0x23, 0x57, 0xa4,
+	0x91, 0xfb, 0xfd, 0xb6, 0x72, 0x2c, 0x08, 0xaf, 0xba, 0xba, 0x22, 0xa9, 0xd0, 0x8a, 0x78, 0x13,
+	0x90, 0x22, 0x7e, 0xd4, 0x09, 0xcc, 0xc8, 0xed, 0xb0, 0xd5, 0x71, 0xbc, 0xd3, 0xe7, 0x6b, 0x40,
+	0x2a, 0xf1, 0xc7, 0x36, 0x68, 0x8d, 0xc8, 0xa2, 0x26, 0x0d, 0xfa, 0x00, 0x90, 0x4a, 0x1c, 0x2b,
+	0xa3, 0xd7, 0xe1, 0xd2, 0x73, 0x6b, 0x48, 0x53, 0x03, 0xa5, 0xfa, 0x21, 0xc3, 0xef, 0xa2, 0xde,
+	0xb2, 0x79, 0x6d, 0xaa, 0x5c, 0x1d, 0x30, 0x96, 0xf2, 0x5f, 0x6a, 0x50, 0x5c, 0xe9, 0x1a, 0x76,
+	0x4f, 0x2a, 0x7e, 0x17, 0xb2, 0xfc, 0x86, 0x25, 0x40, 0x8d, 0x7b, 0x41, 0x31, 0x2a, 0x2f, 0x6f,
+	0xac, 0xf0, 0xfb, 0x98, 0x18, 0x45, 0x0d, 0x17, 0xef, 0x1e, 0x6b, 0xa1, 0x77, 0x90, 0x35, 0xf4,
+	0x06, 0x64, 0x0c, 0x3a, 0x84, 0xa5, 0xe0, 0x72, 0xf8, 0x6e, 0xcb, 0xa4, 0xb1, 0x73, 0x20, 0xe7,
+	0xc2, 0xef, 0x40, 0x41, 0xd1, 0x40, 0x6f, 0xef, 0xcf, 0x1a, 0xe2, 0xd0, 0xb6, 0xb2, 0xda, 0xdc,
+	0x78, 0xc1, 0x2f, 0xf5, 0x65, 0x80, 0xb5, 0x86, 0xd7, 0x4e, 0xe1, 0x8f, 0xc5, 0x28, 0x91, 0xef,
+	0x54, 0x7b, 0xb4, 0x24, 0x7b, 0x52, 0x17, 0xb2, 0xe7, 0x14, 0x4a, 0x62, 0xfa, 0xe3, 0xa6, 0x6f,
+	0x26, 0x2f, 0x21, 0x7d, 0x2b, 0xc6, 0xeb, 0x82, 0x11, 0x4f, 0x43, 0x49, 0x24, 0x74, 0xb1, 0xff,
+	0x7e, 0xa1, 0x41, 0x59, 0x52, 0xc6, 0x05, 0x5f, 0x25, 0x6e, 0xc4, 0x2b, 0x80, 0x87, 0x1a, 0x5d,
+	0x81, 0x6c, 0xfb, 0x70, 0xaf, 0xf3, 0x5a, 0x02, 0xe5, 0xa2, 0x45, 0xe9, 0x5d, 0xae, 0x87, 0xbf,
+	0x56, 0x89, 0x16, 0xba, 0xce, 0x1f, 0xb2, 0x36, 0xcc, 0x36, 0x39, 0x65, 0x67, 0xca, 0x49, 0xdd,
+	0x27, 0xb0, 0x0b, 0xb5, 0x78, 0xd5, 0x62, 0x07, 0x49, 0xf5, 0x95, 0x6b, 0x06, 0x2e, 0xad, 0x0c,
+	0xdc, 0xe3, 0x86, 0x69, 0x1c, 0x76, 0x65, 0xc6, 0xa2, 0x65, 0x96, 0x12, 0xd7, 0x3a, 0x8e, 0x4a,
+	0x6d, 0xc0, 0x0c, 0xa5, 0x12, 0xd3, 0xed, 0xb4, 0x94, 0xf4, 0x26, 0x8b, 0x98, 0x16, 0x2a, 0x62,
+	0x86, 0xe3, 0xbc, 0xb2, 0xec, 0xb6, 0x98, 0x9a, 0xd7, 0xc6, 0x6b, 0x5c, 0xf8, 0xbe, 0x13, 0x28,
+	0x53, 0xdf, 0x56, 0xca, 0xa2, 0x2f, 0xe5, 0x19, 0x71, 0x47, 0x48, 0xc1, 0x8f, 0xe0, 0xb2, 0xe4,
+	0x14, 0xc0, 0xe4, 0x08, 0xe6, 0x1d, 0xb8, 0x21, 0x99, 0x57, 0x8f, 0xe9, 0x45, 0x6d, 0x57, 0x28,
+	0xfc, 0xae, 0x76, 0x3e, 0x85, 0xaa, 0x67, 0x27, 0x3b, 0x2c, 0x5b, 0x5d, 0xd5, 0x80, 0x81, 0x23,
+	0xf6, 0x4c, 0x5e, 0x67, 0xdf, 0x94, 0x66, 0x5b, 0x5d, 0xef, 0x48, 0x40, 0xbf, 0xf1, 0x2a, 0xcc,
+	0x49, 0x19, 0xe2, 0x18, 0x1b, 0x14, 0x12, 0x31, 0x28, 0x4e, 0x88, 0x70, 0x18, 0x1d, 0x3a, 0xda,
+	0xed, 0x2a, 0x67, 0xd0, 0xb5, 0x4c, 0xa6, 0xa6, 0xc8, 0xbc, 0xcc, 0x77, 0x04, 0x35, 0x4c, 0xad,
+	0x18, 0x82, 0x4c, 0x05, 0xa8, 0x64, 0xb1, 0x10, 0x94, 0x1c, 0x59, 0x88, 0x88, 0xe8, 0x4f, 0x60,
+	0xde, 0x33, 0x82, 0xfa, 0x6d, 0x97, 0xd8, 0xbd, 0x8e, 0xe3, 0x28, 0x50, 0x56, 0xdc, 0xc4, 0xef,
+	0xc1, 0x64, 0x9f, 0x88, 0x9c, 0x52, 0x58, 0x46, 0x4b, 0xfc, 0xed, 0x79, 0x49, 0x19, 0xcc, 0xfa,
+	0x71, 0x1b, 0x6e, 0x4a, 0xe9, 0xdc, 0xa3, 0xb1, 0xe2, 0xc3, 0x46, 0xc9, 0x0b, 0x3e, 0x77, 0x6b,
+	0xf4, 0x82, 0x9f, 0xe6, 0x6b, 0xef, 0xc1, 0xab, 0x1f, 0x70, 0x47, 0xca, 0xd8, 0x1a, 0xab, 0x56,
+	0x6c, 0x72, 0x9f, 0x7a, 0x21, 0x39, 0x96, 0xb0, 0x43, 0x98, 0x0d, 0x46, 0xf2, 0x58, 0x69, 0x6c,
+	0x16, 0x32, 0xae, 0x75, 0x42, 0x64, 0x12, 0xe3, 0x0d, 0x69, 0xb0, 0x17, 0xe6, 0x63, 0x19, 0x6c,
+	0xf8, 0xc2, 0xd8, 0x96, 0x1c, 0xd7, 0x5e, 0xba, 0x9a, 0xf2, 0xf0, 0xc5, 0x1b, 0x78, 0x1b, 0xae,
+	0x84, 0xd3, 0xc4, 0x58, 0x26, 0xbf, 0xe0, 0x1b, 0x38, 0x2e, 0x93, 0x8c, 0x25, 0xf7, 0x43, 0x3f,
+	0x19, 0x28, 0x09, 0x65, 0x2c, 0x91, 0x3a, 0xd4, 0xe2, 0xf2, 0xcb, 0xf7, 0xb1, 0x5f, 0xbd, 0x74,
+	0x33, 0x96, 0x30, 0xc7, 0x17, 0x36, 0xfe, 0xf2, 0xfb, 0x39, 0x22, 0x3d, 0x32, 0x47, 0x88, 0x20,
+	0xf1, 0xb3, 0xd8, 0x0f, 0xb0, 0xe9, 0x84, 0x0e, 0x3f, 0x81, 0x8e, 0xab, 0x83, 0xd6, 0x10, 0x4f,
+	0x07, 0x6b, 0xc8, 0x8d, 0xad, 0xa6, 0xdd, 0xb1, 0x16, 0xe3, 0x23, 0x3f, 0x77, 0x46, 0x32, 0xf3,
+	0x58, 0x82, 0x3f, 0x86, 0x85, 0xe4, 0xa4, 0x3c, 0x8e, 0xe4, 0x87, 0x75, 0xc8, 0x7b, 0x07, 0x4a,
+	0xe5, 0x77, 0x1b, 0x05, 0xc8, 0x6d, 0xef, 0xec, 0xed, 0xae, 0xac, 0x36, 0xf8, 0x0f, 0x37, 0x56,
+	0x77, 0x74, 0x7d, 0x7f, 0xb7, 0x59, 0x49, 0x2d, 0xff, 0x29, 0x0d, 0xa9, 0xcd, 0x17, 0xe8, 0x1f,
+	0x21, 0xc3, 0x5f, 0x31, 0x47, 0x3c, 0x5d, 0xd7, 0x46, 0x3d, 0xd4, 0xe2, 0x6b, 0x9f, 0xfd, 0xe6,
+	0xf7, 0x5f, 0xa6, 0x2e, 0xe3, 0x4a, 0x7d, 0xf8, 0xf6, 0x21, 0x71, 0x8d, 0xfa, 0xc9, 0xb0, 0xce,
+	0xea, 0xc3, 0x13, 0xed, 0x21, 0xda, 0x87, 0xf4, 0xee, 0xc0, 0x45, 0x89, 0xcf, 0xda, 0xb5, 0xe4,
+	0xf7, 0x5b, 0x3c, 0xc7, 0x04, 0xcf, 0xe0, 0xb2, 0x22, 0xb8, 0x3f, 0x70, 0xa9, 0xd8, 0x01, 0x14,
+	0xd4, 0x17, 0xd8, 0x73, 0xdf, 0xbb, 0x6b, 0xe7, 0xbf, 0xee, 0xe2, 0x5b, 0x4c, 0xdd, 0x35, 0x7c,
+	0x45, 0x51, 0xc7, 0xdf, 0x89, 0xd5, 0xd9, 0x34, 0x4f, 0x4d, 0x94, 0xf8, 0x22, 0x5e, 0x4b, 0x7e,
+	0xf4, 0x8d, 0x9d, 0x8d, 0x7b, 0x6a, 0x52, 0xb1, 0xa6, 0x78, 0xf3, 0x6d, 0xb9, 0xe8, 0x66, 0xcc,
+	0x9b, 0x9f, 0xfa, 0xba, 0x55, 0x5b, 0x48, 0x66, 0x10, 0x8a, 0x16, 0x98, 0xa2, 0x1a, 0xbe, 0xac,
+	0x28, 0x6a, 0x79, 0x6c, 0x4f, 0xb4, 0x87, 0xcb, 0x47, 0x90, 0x61, 0xe8, 0x31, 0xfa, 0x27, 0xf9,
+	0x51, 0x8b, 0x81, 0xd1, 0x13, 0x16, 0x3f, 0x80, 0x3b, 0xe3, 0x2a, 0x53, 0x86, 0x70, 0x49, 0x2a,
+	0x63, 0xf8, 0xf1, 0x13, 0xed, 0xe1, 0xa2, 0xf6, 0xa6, 0xb6, 0xfc, 0xc7, 0x49, 0xc8, 0x30, 0xb8,
+	0x08, 0x59, 0x00, 0x3e, 0x9a, 0x1a, 0x9e, 0x65, 0x04, 0x9f, 0x0d, 0xcf, 0x32, 0x0a, 0xc4, 0xe2,
+	0x79, 0xa6, 0xb8, 0x8a, 0x67, 0xa4, 0x62, 0x86, 0x44, 0xd5, 0x19, 0xb8, 0x46, 0x7d, 0x3a, 0x14,
+	0x80, 0x19, 0x0f, 0x33, 0x14, 0x27, 0x30, 0x80, 0xaa, 0x86, 0x77, 0x48, 0x0c, 0xa2, 0x8a, 0x31,
+	0xd3, 0x79, 0x1d, 0x5f, 0x55, 0x3c, 0xcb, 0xd5, 0xda, 0x8c, 0x91, 0xea, 0xfd, 0x0f, 0x0d, 0xca,
+	0x41, 0x5c, 0x14, 0xdd, 0x8e, 0x91, 0x1c, 0x86, 0x57, 0x6b, 0x77, 0x46, 0x33, 0x25, 0x59, 0xc0,
+	0xd5, 0x9f, 0x10, 0xd2, 0x37, 0x28, 0xa3, 0x70, 0x3c, 0xfa, 0x42, 0x83, 0xe9, 0x10, 0xd8, 0x89,
+	0xe2, 0x34, 0x44, 0xa0, 0xd4, 0xda, 0xdd, 0x73, 0xb8, 0x84, 0x21, 0xf7, 0x98, 0x21, 0x0b, 0xf8,
+	0x5a, 0xc4, 0x15, 0x6e, 0xa7, 0x47, 0x5c, 0x4b, 0x18, 0xe3, 0x2d, 0x03, 0x07, 0x26, 0x63, 0x97,
+	0x21, 0x00, 0x74, 0xc6, 0x2e, 0x43, 0x10, 0xd5, 0x1c, 0xb1, 0x0c, 0x1c, 0x8d, 0xa4, 0x5b, 0xfc,
+	0xcf, 0x69, 0xc8, 0xad, 0xf2, 0x5f, 0x4f, 0x22, 0x07, 0xf2, 0x1e, 0x02, 0x88, 0xe6, 0xe3, 0xd0,
+	0x18, 0xff, 0xb6, 0x50, 0xbb, 0x99, 0xd8, 0x2f, 0xb4, 0xdf, 0x65, 0xda, 0x6f, 0xe2, 0x9a, 0xd4,
+	0x2e, 0x7e, 0xa4, 0x59, 0xe7, 0xd7, 0xfe, 0xba, 0xd1, 0x6e, 0xd3, 0x89, 0xff, 0x3b, 0x14, 0x55,
+	0x98, 0x0e, 0xdd, 0x8a, 0x45, 0x81, 0x54, 0xa4, 0xaf, 0x86, 0x47, 0xb1, 0x08, 0xed, 0x8b, 0x4c,
+	0x3b, 0xc6, 0x37, 0x12, 0xb4, 0xdb, 0x8c, 0x3d, 0x60, 0x00, 0x87, 0xd9, 0xe2, 0x0d, 0x08, 0xa0,
+	0x78, 0xf1, 0x06, 0x04, 0x51, 0xba, 0x73, 0x0d, 0x18, 0x30, 0x76, 0x6a, 0xc0, 0x2b, 0x00, 0x1f,
+	0x54, 0x43, 0xb1, 0x7e, 0x55, 0xae, 0x4e, 0xe1, 0x90, 0x8f, 0xe2, 0x71, 0xd1, 0x3d, 0x17, 0x52,
+	0xdd, 0xed, 0x38, 0x34, 0xf4, 0x97, 0xbf, 0xca, 0x42, 0xe1, 0xb9, 0xd1, 0x31, 0x5d, 0x62, 0x1a,
+	0x66, 0x8b, 0xa0, 0x97, 0x90, 0x61, 0xa5, 0x31, 0x9c, 0xe5, 0x54, 0xac, 0x29, 0x9c, 0xe5, 0x02,
+	0x40, 0x0c, 0xbe, 0xc3, 0x34, 0xcf, 0xe3, 0x39, 0xa9, 0xb9, 0xe7, 0x8b, 0xaf, 0x33, 0x0c, 0x85,
+	0x4e, 0xf8, 0x9f, 0x21, 0x2b, 0xe0, 0xf9, 0x90, 0xb0, 0x00, 0xb6, 0x52, 0xbb, 0x1e, 0xdf, 0x99,
+	0xb4, 0xbd, 0x54, 0x55, 0x0e, 0xe3, 0xa5, 0xba, 0x5e, 0x03, 0xf8, 0x00, 0x61, 0xd8, 0xb9, 0x11,
+	0x3c, 0xb1, 0xb6, 0x90, 0xcc, 0x20, 0xf4, 0x3e, 0x60, 0x7a, 0x6f, 0xe3, 0xf9, 0x38, 0xbd, 0x6d,
+	0x8f, 0x9f, 0xea, 0x3e, 0x84, 0xc9, 0x75, 0xc3, 0x39, 0x46, 0xa1, 0x62, 0xa7, 0xfc, 0xe0, 0xa1,
+	0x56, 0x8b, 0xeb, 0x12, 0x9a, 0x6e, 0x33, 0x4d, 0x37, 0x70, 0x35, 0x4e, 0xd3, 0xb1, 0xe1, 0xd0,
+	0xea, 0x81, 0x8e, 0x21, 0xcb, 0x7f, 0x03, 0x11, 0xf6, 0x65, 0xe0, 0x77, 0x14, 0x61, 0x5f, 0x06,
+	0x7f, 0x36, 0x71, 0x31, 0x4d, 0x2e, 0x4c, 0xc9, 0x1f, 0x1e, 0xa0, 0x1b, 0xa1, 0xa5, 0x09, 0xfe,
+	0x48, 0xa1, 0x36, 0x9f, 0xd4, 0x2d, 0xf4, 0xdd, 0x67, 0xfa, 0x6e, 0xe1, 0xeb, 0xb1, 0x6b, 0x27,
+	0xb8, 0x9f, 0x68, 0x0f, 0xdf, 0xd4, 0x68, 0x99, 0x00, 0x1f, 0x64, 0x8d, 0x44, 0x47, 0x18, 0xaf,
+	0x8d, 0x44, 0x47, 0x04, 0x9f, 0xc5, 0xcb, 0x4c, 0xf9, 0x63, 0x7c, 0x3f, 0x4e, 0xb9, 0x6b, 0x1b,
+	0xa6, 0xf3, 0x92, 0xd8, 0x6f, 0x70, 0x30, 0xcd, 0x39, 0xee, 0xf4, 0x69, 0xa4, 0xfc, 0x65, 0x1a,
+	0x26, 0xe9, 0x79, 0x94, 0x96, 0x67, 0xff, 0x1a, 0x1f, 0xb6, 0x26, 0x02, 0x9e, 0x85, 0xad, 0x89,
+	0x22, 0x00, 0xd1, 0xf2, 0xcc, 0x7e, 0x27, 0x4f, 0x18, 0x13, 0xf5, 0xba, 0x03, 0x05, 0xe5, 0xae,
+	0x8f, 0x62, 0x04, 0x06, 0x91, 0xb9, 0x70, 0x5d, 0x88, 0x01, 0x0a, 0xf0, 0x4d, 0xa6, 0x73, 0x0e,
+	0xcf, 0x06, 0x74, 0xb6, 0x39, 0x17, 0x55, 0xfa, 0xaf, 0x50, 0x54, 0x31, 0x01, 0x14, 0x23, 0x33,
+	0x84, 0xfc, 0x85, 0x53, 0x62, 0x1c, 0xa4, 0x10, 0xcd, 0x0e, 0xde, 0xff, 0x09, 0x90, 0xac, 0x54,
+	0x79, 0x1f, 0x72, 0x02, 0x28, 0x88, 0x9b, 0x6d, 0x10, 0x2a, 0x8c, 0x9b, 0x6d, 0x08, 0x65, 0x88,
+	0x1e, 0xf3, 0x98, 0x56, 0x7a, 0x1f, 0x92, 0x25, 0x48, 0x68, 0x7c, 0x46, 0xdc, 0x24, 0x8d, 0x3e,
+	0xf6, 0x95, 0xa4, 0x51, 0xb9, 0x8b, 0x8e, 0xd2, 0x78, 0x44, 0x5c, 0x11, 0x4b, 0xf2, 0x9e, 0x87,
+	0x12, 0x04, 0xaa, 0x29, 0x1f, 0x8f, 0x62, 0x49, 0x3a, 0x95, 0xfb, 0x4a, 0x45, 0xbe, 0x47, 0x9f,
+	0x02, 0xf8, 0x90, 0x46, 0xf8, 0xb4, 0x15, 0x8b, 0x8b, 0x86, 0x4f, 0x5b, 0xf1, 0xa8, 0x48, 0x34,
+	0x7f, 0xf8, 0xba, 0xf9, 0xc5, 0x80, 0x6a, 0xff, 0x1f, 0x0d, 0x50, 0x14, 0x01, 0x41, 0x8f, 0xe2,
+	0x35, 0xc4, 0x22, 0xae, 0xb5, 0xc7, 0x17, 0x63, 0x4e, 0x2a, 0x11, 0xbe, 0x59, 0x2d, 0x36, 0xa2,
+	0xff, 0x8a, 0x1a, 0xf6, 0xb9, 0x06, 0xa5, 0x00, 0x84, 0x82, 0xee, 0x25, 0xac, 0x71, 0x08, 0xb4,
+	0xad, 0xdd, 0x3f, 0x97, 0x2f, 0xe9, 0x24, 0xa6, 0xec, 0x08, 0x79, 0x10, 0xff, 0x2f, 0x0d, 0xca,
+	0x41, 0xd8, 0x05, 0x25, 0xc8, 0x8f, 0x00, 0xbf, 0xb5, 0xc5, 0xf3, 0x19, 0xcf, 0x5f, 0x2a, 0xff,
+	0x6c, 0xde, 0x87, 0x9c, 0x00, 0x6b, 0xe2, 0x02, 0x22, 0x08, 0x1b, 0xc7, 0x05, 0x44, 0x08, 0xe9,
+	0x49, 0x08, 0x08, 0xdb, 0xea, 0x12, 0x25, 0x04, 0x05, 0xa2, 0x93, 0xa4, 0x71, 0x74, 0x08, 0x86,
+	0xe0, 0xa0, 0x51, 0x1a, 0xfd, 0x10, 0x94, 0x70, 0x0e, 0x4a, 0x10, 0x78, 0x4e, 0x08, 0x86, 0xd1,
+	0xa0, 0x84, 0x10, 0x64, 0x4a, 0x95, 0x10, 0xf4, 0xc1, 0x97, 0xb8, 0x10, 0x8c, 0x20, 0xe2, 0x71,
+	0x21, 0x18, 0xc5, 0x6f, 0x12, 0xd6, 0x95, 0xe9, 0x0e, 0x84, 0xe0, 0x4c, 0x0c, 0x56, 0x83, 0x1e,
+	0x27, 0x38, 0x34, 0x16, 0x6c, 0xaf, 0xbd, 0x71, 0x41, 0xee, 0x91, 0x7b, 0x9f, 0x2f, 0x85, 0xdc,
+	0xfb, 0xff, 0xaf, 0xc1, 0x6c, 0x1c, 0xd6, 0x83, 0x12, 0x74, 0x25, 0x00, 0xf5, 0xb5, 0xa5, 0x8b,
+	0xb2, 0x9f, 0xef, 0x35, 0x2f, 0x1a, 0x9e, 0x56, 0x7e, 0xfe, 0xcd, 0xbc, 0xf6, 0xab, 0x6f, 0xe6,
+	0xb5, 0xdf, 0x7e, 0x33, 0xaf, 0xfd, 0xef, 0xef, 0xe6, 0x27, 0x0e, 0xb3, 0xec, 0x7f, 0xa7, 0xbd,
+	0xfd, 0xd7, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x77, 0x6b, 0x63, 0x24, 0x37, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// KVClient is the client API for KV service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type KVClient interface {
+	// Range gets the keys in the range from the key-value store.
+	Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error)
+	// Put puts the given key into the key-value store.
+	// A put request increments the revision of the key-value store
+	// and generates one event in the event history.
+	Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error)
+	// DeleteRange deletes the given range from the key-value store.
+	// A delete request increments the revision of the key-value store
+	// and generates a delete event in the event history for every deleted key.
+	DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error)
+	// Txn processes multiple requests in a single transaction.
+	// A txn request increments the revision of the key-value store
+	// and generates events with the same revision for every completed request.
+	// It is not allowed to modify the same key several times within one txn.
+	Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error)
+	// Compact compacts the event history in the etcd key-value store. The key-value
+	// store should be periodically compacted or the event history will continue to grow
+	// indefinitely.
+	Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error)
+}
+
+type kVClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewKVClient(cc *grpc.ClientConn) KVClient {
+	return &kVClient{cc}
+}
+
+func (c *kVClient) Range(ctx context.Context, in *RangeRequest, opts ...grpc.CallOption) (*RangeResponse, error) {
+	out := new(RangeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Range", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) {
+	out := new(PutResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Put", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) DeleteRange(ctx context.Context, in *DeleteRangeRequest, opts ...grpc.CallOption) (*DeleteRangeResponse, error) {
+	out := new(DeleteRangeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/DeleteRange", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Txn(ctx context.Context, in *TxnRequest, opts ...grpc.CallOption) (*TxnResponse, error) {
+	out := new(TxnResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Txn", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *kVClient) Compact(ctx context.Context, in *CompactionRequest, opts ...grpc.CallOption) (*CompactionResponse, error) {
+	out := new(CompactionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.KV/Compact", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// KVServer is the server API for KV service.
+type KVServer interface {
+	// Range gets the keys in the range from the key-value store.
+	Range(context.Context, *RangeRequest) (*RangeResponse, error)
+	// Put puts the given key into the key-value store.
+	// A put request increments the revision of the key-value store
+	// and generates one event in the event history.
+	Put(context.Context, *PutRequest) (*PutResponse, error)
+	// DeleteRange deletes the given range from the key-value store.
+	// A delete request increments the revision of the key-value store
+	// and generates a delete event in the event history for every deleted key.
+	DeleteRange(context.Context, *DeleteRangeRequest) (*DeleteRangeResponse, error)
+	// Txn processes multiple requests in a single transaction.
+	// A txn request increments the revision of the key-value store
+	// and generates events with the same revision for every completed request.
+	// It is not allowed to modify the same key several times within one txn.
+	Txn(context.Context, *TxnRequest) (*TxnResponse, error)
+	// Compact compacts the event history in the etcd key-value store. The key-value
+	// store should be periodically compacted or the event history will continue to grow
+	// indefinitely.
+	Compact(context.Context, *CompactionRequest) (*CompactionResponse, error)
+}
+
+// UnimplementedKVServer can be embedded to have forward compatible implementations.
+type UnimplementedKVServer struct {
+}
+
+func (*UnimplementedKVServer) Range(ctx context.Context, req *RangeRequest) (*RangeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Range not implemented")
+}
+func (*UnimplementedKVServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Put not implemented")
+}
+func (*UnimplementedKVServer) DeleteRange(ctx context.Context, req *DeleteRangeRequest) (*DeleteRangeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteRange not implemented")
+}
+func (*UnimplementedKVServer) Txn(ctx context.Context, req *TxnRequest) (*TxnResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Txn not implemented")
+}
+func (*UnimplementedKVServer) Compact(ctx context.Context, req *CompactionRequest) (*CompactionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented")
+}
+
+func RegisterKVServer(s *grpc.Server, srv KVServer) {
+	s.RegisterService(&_KV_serviceDesc, srv)
+}
+
+func _KV_Range_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(RangeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Range(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Range",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Range(ctx, req.(*RangeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PutRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Put(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Put",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Put(ctx, req.(*PutRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_DeleteRange_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeleteRangeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).DeleteRange(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/DeleteRange",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).DeleteRange(ctx, req.(*DeleteRangeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Txn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(TxnRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Txn(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Txn",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Txn(ctx, req.(*TxnRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _KV_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(CompactionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(KVServer).Compact(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.KV/Compact",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(KVServer).Compact(ctx, req.(*CompactionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _KV_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.KV",
+	HandlerType: (*KVServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Range",
+			Handler:    _KV_Range_Handler,
+		},
+		{
+			MethodName: "Put",
+			Handler:    _KV_Put_Handler,
+		},
+		{
+			MethodName: "DeleteRange",
+			Handler:    _KV_DeleteRange_Handler,
+		},
+		{
+			MethodName: "Txn",
+			Handler:    _KV_Txn_Handler,
+		},
+		{
+			MethodName: "Compact",
+			Handler:    _KV_Compact_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+// WatchClient is the client API for Watch service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type WatchClient interface {
+	// Watch watches for events happening or that have happened. Both input and output
+	// are streams; the input stream is for creating and canceling watchers and the output
+	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+	// for several watches at once. The entire event history can be watched starting from the
+	// last compaction revision.
+	Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error)
+}
+
+type watchClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewWatchClient(cc *grpc.ClientConn) WatchClient {
+	return &watchClient{cc}
+}
+
+func (c *watchClient) Watch(ctx context.Context, opts ...grpc.CallOption) (Watch_WatchClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Watch_serviceDesc.Streams[0], "/etcdserverpb.Watch/Watch", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &watchWatchClient{stream}
+	return x, nil
+}
+
+type Watch_WatchClient interface {
+	Send(*WatchRequest) error
+	Recv() (*WatchResponse, error)
+	grpc.ClientStream
+}
+
+type watchWatchClient struct {
+	grpc.ClientStream
+}
+
+func (x *watchWatchClient) Send(m *WatchRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *watchWatchClient) Recv() (*WatchResponse, error) {
+	m := new(WatchResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+// WatchServer is the server API for Watch service.
+type WatchServer interface {
+	// Watch watches for events happening or that have happened. Both input and output
+	// are streams; the input stream is for creating and canceling watchers and the output
+	// stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+	// for several watches at once. The entire event history can be watched starting from the
+	// last compaction revision.
+	Watch(Watch_WatchServer) error
+}
+
+// UnimplementedWatchServer can be embedded to have forward compatible implementations.
+type UnimplementedWatchServer struct {
+}
+
+func (*UnimplementedWatchServer) Watch(srv Watch_WatchServer) error {
+	return status.Errorf(codes.Unimplemented, "method Watch not implemented")
+}
+
+func RegisterWatchServer(s *grpc.Server, srv WatchServer) {
+	s.RegisterService(&_Watch_serviceDesc, srv)
+}
+
+func _Watch_Watch_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(WatchServer).Watch(&watchWatchServer{stream})
+}
+
+type Watch_WatchServer interface {
+	Send(*WatchResponse) error
+	Recv() (*WatchRequest, error)
+	grpc.ServerStream
+}
+
+type watchWatchServer struct {
+	grpc.ServerStream
+}
+
+func (x *watchWatchServer) Send(m *WatchResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *watchWatchServer) Recv() (*WatchRequest, error) {
+	m := new(WatchRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+var _Watch_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Watch",
+	HandlerType: (*WatchServer)(nil),
+	Methods:     []grpc.MethodDesc{},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Watch",
+			Handler:       _Watch_Watch_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// LeaseClient is the client API for Lease service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type LeaseClient interface {
+	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+	// within a given time to live period. All keys attached to the lease will be expired and
+	// deleted if the lease expires. Each expired key generates a delete event in the event history.
+	LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error)
+	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+	LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error)
+	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+	// to the server and streaming keep alive responses from the server to the client.
+	LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error)
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error)
+	// LeaseLeases lists all existing leases.
+	LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error)
+}
+
+type leaseClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewLeaseClient(cc *grpc.ClientConn) LeaseClient {
+	return &leaseClient{cc}
+}
+
+func (c *leaseClient) LeaseGrant(ctx context.Context, in *LeaseGrantRequest, opts ...grpc.CallOption) (*LeaseGrantResponse, error) {
+	out := new(LeaseGrantResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseGrant", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseRevoke(ctx context.Context, in *LeaseRevokeRequest, opts ...grpc.CallOption) (*LeaseRevokeResponse, error) {
+	out := new(LeaseRevokeResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseRevoke", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseKeepAlive(ctx context.Context, opts ...grpc.CallOption) (Lease_LeaseKeepAliveClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Lease_serviceDesc.Streams[0], "/etcdserverpb.Lease/LeaseKeepAlive", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &leaseLeaseKeepAliveClient{stream}
+	return x, nil
+}
+
+type Lease_LeaseKeepAliveClient interface {
+	Send(*LeaseKeepAliveRequest) error
+	Recv() (*LeaseKeepAliveResponse, error)
+	grpc.ClientStream
+}
+
+type leaseLeaseKeepAliveClient struct {
+	grpc.ClientStream
+}
+
+func (x *leaseLeaseKeepAliveClient) Send(m *LeaseKeepAliveRequest) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveClient) Recv() (*LeaseKeepAliveResponse, error) {
+	m := new(LeaseKeepAliveResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *leaseClient) LeaseTimeToLive(ctx context.Context, in *LeaseTimeToLiveRequest, opts ...grpc.CallOption) (*LeaseTimeToLiveResponse, error) {
+	out := new(LeaseTimeToLiveResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseTimeToLive", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *leaseClient) LeaseLeases(ctx context.Context, in *LeaseLeasesRequest, opts ...grpc.CallOption) (*LeaseLeasesResponse, error) {
+	out := new(LeaseLeasesResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Lease/LeaseLeases", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// LeaseServer is the server API for Lease service.
+type LeaseServer interface {
+	// LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+	// within a given time to live period. All keys attached to the lease will be expired and
+	// deleted if the lease expires. Each expired key generates a delete event in the event history.
+	LeaseGrant(context.Context, *LeaseGrantRequest) (*LeaseGrantResponse, error)
+	// LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+	LeaseRevoke(context.Context, *LeaseRevokeRequest) (*LeaseRevokeResponse, error)
+	// LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+	// to the server and streaming keep alive responses from the server to the client.
+	LeaseKeepAlive(Lease_LeaseKeepAliveServer) error
+	// LeaseTimeToLive retrieves lease information.
+	LeaseTimeToLive(context.Context, *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error)
+	// LeaseLeases lists all existing leases.
+	LeaseLeases(context.Context, *LeaseLeasesRequest) (*LeaseLeasesResponse, error)
+}
+
+// UnimplementedLeaseServer can be embedded to have forward compatible implementations.
+type UnimplementedLeaseServer struct {
+}
+
+func (*UnimplementedLeaseServer) LeaseGrant(ctx context.Context, req *LeaseGrantRequest) (*LeaseGrantResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseGrant not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseRevoke(ctx context.Context, req *LeaseRevokeRequest) (*LeaseRevokeResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseRevoke not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseKeepAlive(srv Lease_LeaseKeepAliveServer) error {
+	return status.Errorf(codes.Unimplemented, "method LeaseKeepAlive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseTimeToLive(ctx context.Context, req *LeaseTimeToLiveRequest) (*LeaseTimeToLiveResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseTimeToLive not implemented")
+}
+func (*UnimplementedLeaseServer) LeaseLeases(ctx context.Context, req *LeaseLeasesRequest) (*LeaseLeasesResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method LeaseLeases not implemented")
+}
+
+func RegisterLeaseServer(s *grpc.Server, srv LeaseServer) {
+	s.RegisterService(&_Lease_serviceDesc, srv)
+}
+
+func _Lease_LeaseGrant_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseGrantRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseGrant(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseGrant",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseGrant(ctx, req.(*LeaseGrantRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseRevoke_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseRevokeRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseRevoke(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseRevoke",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseRevoke(ctx, req.(*LeaseRevokeRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseKeepAlive_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(LeaseServer).LeaseKeepAlive(&leaseLeaseKeepAliveServer{stream})
+}
+
+type Lease_LeaseKeepAliveServer interface {
+	Send(*LeaseKeepAliveResponse) error
+	Recv() (*LeaseKeepAliveRequest, error)
+	grpc.ServerStream
+}
+
+type leaseLeaseKeepAliveServer struct {
+	grpc.ServerStream
+}
+
+func (x *leaseLeaseKeepAliveServer) Send(m *LeaseKeepAliveResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *leaseLeaseKeepAliveServer) Recv() (*LeaseKeepAliveRequest, error) {
+	m := new(LeaseKeepAliveRequest)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _Lease_LeaseTimeToLive_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseTimeToLiveRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseTimeToLive(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseTimeToLive",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseTimeToLive(ctx, req.(*LeaseTimeToLiveRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Lease_LeaseLeases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LeaseLeasesRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(LeaseServer).LeaseLeases(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Lease/LeaseLeases",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(LeaseServer).LeaseLeases(ctx, req.(*LeaseLeasesRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Lease_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Lease",
+	HandlerType: (*LeaseServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "LeaseGrant",
+			Handler:    _Lease_LeaseGrant_Handler,
+		},
+		{
+			MethodName: "LeaseRevoke",
+			Handler:    _Lease_LeaseRevoke_Handler,
+		},
+		{
+			MethodName: "LeaseTimeToLive",
+			Handler:    _Lease_LeaseTimeToLive_Handler,
+		},
+		{
+			MethodName: "LeaseLeases",
+			Handler:    _Lease_LeaseLeases_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "LeaseKeepAlive",
+			Handler:       _Lease_LeaseKeepAlive_Handler,
+			ServerStreams: true,
+			ClientStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// ClusterClient is the client API for Cluster service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ClusterClient interface {
+	// MemberAdd adds a member into the cluster.
+	MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error)
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error)
+	// MemberUpdate updates the member configuration.
+	MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error)
+	// MemberList lists all the members in the cluster.
+	MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error)
+}
+
+type clusterClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewClusterClient(cc *grpc.ClientConn) ClusterClient {
+	return &clusterClient{cc}
+}
+
+func (c *clusterClient) MemberAdd(ctx context.Context, in *MemberAddRequest, opts ...grpc.CallOption) (*MemberAddResponse, error) {
+	out := new(MemberAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberRemove(ctx context.Context, in *MemberRemoveRequest, opts ...grpc.CallOption) (*MemberRemoveResponse, error) {
+	out := new(MemberRemoveResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberRemove", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberUpdate(ctx context.Context, in *MemberUpdateRequest, opts ...grpc.CallOption) (*MemberUpdateResponse, error) {
+	out := new(MemberUpdateResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *clusterClient) MemberList(ctx context.Context, in *MemberListRequest, opts ...grpc.CallOption) (*MemberListResponse, error) {
+	out := new(MemberListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Cluster/MemberList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ClusterServer is the server API for Cluster service.
+type ClusterServer interface {
+	// MemberAdd adds a member into the cluster.
+	MemberAdd(context.Context, *MemberAddRequest) (*MemberAddResponse, error)
+	// MemberRemove removes an existing member from the cluster.
+	MemberRemove(context.Context, *MemberRemoveRequest) (*MemberRemoveResponse, error)
+	// MemberUpdate updates the member configuration.
+	MemberUpdate(context.Context, *MemberUpdateRequest) (*MemberUpdateResponse, error)
+	// MemberList lists all the members in the cluster.
+	MemberList(context.Context, *MemberListRequest) (*MemberListResponse, error)
+}
+
+// UnimplementedClusterServer can be embedded to have forward compatible implementations.
+type UnimplementedClusterServer struct {
+}
+
+func (*UnimplementedClusterServer) MemberAdd(ctx context.Context, req *MemberAddRequest) (*MemberAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberAdd not implemented")
+}
+func (*UnimplementedClusterServer) MemberRemove(ctx context.Context, req *MemberRemoveRequest) (*MemberRemoveResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberRemove not implemented")
+}
+func (*UnimplementedClusterServer) MemberUpdate(ctx context.Context, req *MemberUpdateRequest) (*MemberUpdateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberUpdate not implemented")
+}
+func (*UnimplementedClusterServer) MemberList(ctx context.Context, req *MemberListRequest) (*MemberListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MemberList not implemented")
+}
+
+func RegisterClusterServer(s *grpc.Server, srv ClusterServer) {
+	s.RegisterService(&_Cluster_serviceDesc, srv)
+}
+
+func _Cluster_MemberAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberAdd(ctx, req.(*MemberAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberRemove_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberRemoveRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberRemove(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberRemove",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberRemove(ctx, req.(*MemberRemoveRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberUpdateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberUpdate(ctx, req.(*MemberUpdateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Cluster_MemberList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MemberListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ClusterServer).MemberList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Cluster/MemberList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ClusterServer).MemberList(ctx, req.(*MemberListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Cluster_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Cluster",
+	HandlerType: (*ClusterServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "MemberAdd",
+			Handler:    _Cluster_MemberAdd_Handler,
+		},
+		{
+			MethodName: "MemberRemove",
+			Handler:    _Cluster_MemberRemove_Handler,
+		},
+		{
+			MethodName: "MemberUpdate",
+			Handler:    _Cluster_MemberUpdate_Handler,
+		},
+		{
+			MethodName: "MemberList",
+			Handler:    _Cluster_MemberList_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+// MaintenanceClient is the client API for Maintenance service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type MaintenanceClient interface {
+	// Alarm activates, deactivates, and queries alarms regarding cluster health.
+	Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error)
+	// Status gets the status of the member.
+	Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error)
+	// Defragment defragments a member's backend database to recover storage space.
+	Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error)
+	// Hash computes the hash of the KV's backend.
+	// This is designed for testing; do not use this in production when there
+	// are ongoing transactions.
+	Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error)
+	// HashKV computes the hash of all MVCC keys up to a given revision.
+	HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error)
+	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+	Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error)
+	// MoveLeader requests current leader node to transfer its leadership to transferee.
+	MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error)
+}
+
+type maintenanceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewMaintenanceClient(cc *grpc.ClientConn) MaintenanceClient {
+	return &maintenanceClient{cc}
+}
+
+func (c *maintenanceClient) Alarm(ctx context.Context, in *AlarmRequest, opts ...grpc.CallOption) (*AlarmResponse, error) {
+	out := new(AlarmResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Alarm", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Status(ctx context.Context, in *StatusRequest, opts ...grpc.CallOption) (*StatusResponse, error) {
+	out := new(StatusResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Status", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Defragment(ctx context.Context, in *DefragmentRequest, opts ...grpc.CallOption) (*DefragmentResponse, error) {
+	out := new(DefragmentResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Defragment", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Hash(ctx context.Context, in *HashRequest, opts ...grpc.CallOption) (*HashResponse, error) {
+	out := new(HashResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/Hash", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) HashKV(ctx context.Context, in *HashKVRequest, opts ...grpc.CallOption) (*HashKVResponse, error) {
+	out := new(HashKVResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/HashKV", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *maintenanceClient) Snapshot(ctx context.Context, in *SnapshotRequest, opts ...grpc.CallOption) (Maintenance_SnapshotClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_Maintenance_serviceDesc.Streams[0], "/etcdserverpb.Maintenance/Snapshot", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &maintenanceSnapshotClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type Maintenance_SnapshotClient interface {
+	Recv() (*SnapshotResponse, error)
+	grpc.ClientStream
+}
+
+type maintenanceSnapshotClient struct {
+	grpc.ClientStream
+}
+
+func (x *maintenanceSnapshotClient) Recv() (*SnapshotResponse, error) {
+	m := new(SnapshotResponse)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *maintenanceClient) MoveLeader(ctx context.Context, in *MoveLeaderRequest, opts ...grpc.CallOption) (*MoveLeaderResponse, error) {
+	out := new(MoveLeaderResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Maintenance/MoveLeader", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// MaintenanceServer is the server API for Maintenance service.
+type MaintenanceServer interface {
+	// Alarm activates, deactivates, and queries alarms regarding cluster health.
+	Alarm(context.Context, *AlarmRequest) (*AlarmResponse, error)
+	// Status gets the status of the member.
+	Status(context.Context, *StatusRequest) (*StatusResponse, error)
+	// Defragment defragments a member's backend database to recover storage space.
+	Defragment(context.Context, *DefragmentRequest) (*DefragmentResponse, error)
+	// Hash computes the hash of the KV's backend.
+	// This is designed for testing; do not use this in production when there
+	// are ongoing transactions.
+	Hash(context.Context, *HashRequest) (*HashResponse, error)
+	// HashKV computes the hash of all MVCC keys up to a given revision.
+	HashKV(context.Context, *HashKVRequest) (*HashKVResponse, error)
+	// Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+	Snapshot(*SnapshotRequest, Maintenance_SnapshotServer) error
+	// MoveLeader requests current leader node to transfer its leadership to transferee.
+	MoveLeader(context.Context, *MoveLeaderRequest) (*MoveLeaderResponse, error)
+}
+
+// UnimplementedMaintenanceServer can be embedded to have forward compatible implementations.
+type UnimplementedMaintenanceServer struct {
+}
+
+func (*UnimplementedMaintenanceServer) Alarm(ctx context.Context, req *AlarmRequest) (*AlarmResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Alarm not implemented")
+}
+func (*UnimplementedMaintenanceServer) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Status not implemented")
+}
+func (*UnimplementedMaintenanceServer) Defragment(ctx context.Context, req *DefragmentRequest) (*DefragmentResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Defragment not implemented")
+}
+func (*UnimplementedMaintenanceServer) Hash(ctx context.Context, req *HashRequest) (*HashResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Hash not implemented")
+}
+func (*UnimplementedMaintenanceServer) HashKV(ctx context.Context, req *HashKVRequest) (*HashKVResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method HashKV not implemented")
+}
+func (*UnimplementedMaintenanceServer) Snapshot(req *SnapshotRequest, srv Maintenance_SnapshotServer) error {
+	return status.Errorf(codes.Unimplemented, "method Snapshot not implemented")
+}
+func (*UnimplementedMaintenanceServer) MoveLeader(ctx context.Context, req *MoveLeaderRequest) (*MoveLeaderResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method MoveLeader not implemented")
+}
+
+func RegisterMaintenanceServer(s *grpc.Server, srv MaintenanceServer) {
+	s.RegisterService(&_Maintenance_serviceDesc, srv)
+}
+
+func _Maintenance_Alarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AlarmRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Alarm(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Alarm",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Alarm(ctx, req.(*AlarmRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(StatusRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Status(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Status",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Status(ctx, req.(*StatusRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Defragment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DefragmentRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Defragment(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Defragment",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Defragment(ctx, req.(*DefragmentRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Hash_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HashRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).Hash(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/Hash",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).Hash(ctx, req.(*HashRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_HashKV_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(HashKVRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).HashKV(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/HashKV",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).HashKV(ctx, req.(*HashKVRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Maintenance_Snapshot_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(SnapshotRequest)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(MaintenanceServer).Snapshot(m, &maintenanceSnapshotServer{stream})
+}
+
+type Maintenance_SnapshotServer interface {
+	Send(*SnapshotResponse) error
+	grpc.ServerStream
+}
+
+type maintenanceSnapshotServer struct {
+	grpc.ServerStream
+}
+
+func (x *maintenanceSnapshotServer) Send(m *SnapshotResponse) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _Maintenance_MoveLeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(MoveLeaderRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(MaintenanceServer).MoveLeader(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Maintenance/MoveLeader",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(MaintenanceServer).MoveLeader(ctx, req.(*MoveLeaderRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Maintenance_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Maintenance",
+	HandlerType: (*MaintenanceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "Alarm",
+			Handler:    _Maintenance_Alarm_Handler,
+		},
+		{
+			MethodName: "Status",
+			Handler:    _Maintenance_Status_Handler,
+		},
+		{
+			MethodName: "Defragment",
+			Handler:    _Maintenance_Defragment_Handler,
+		},
+		{
+			MethodName: "Hash",
+			Handler:    _Maintenance_Hash_Handler,
+		},
+		{
+			MethodName: "HashKV",
+			Handler:    _Maintenance_HashKV_Handler,
+		},
+		{
+			MethodName: "MoveLeader",
+			Handler:    _Maintenance_MoveLeader_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "Snapshot",
+			Handler:       _Maintenance_Snapshot_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "rpc.proto",
+}
+
+// AuthClient is the client API for Auth service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type AuthClient interface {
+	// AuthEnable enables authentication.
+	AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error)
+	// AuthDisable disables authentication.
+	AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error)
+	// Authenticate processes an authenticate request.
+	Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error)
+	// UserAdd adds a new user.
+	UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error)
+	// UserGet gets detailed user information.
+	UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error)
+	// UserList gets a list of all users.
+	UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error)
+	// UserDelete deletes a specified user.
+	UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error)
+	// UserChangePassword changes the password of a specified user.
+	UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error)
+	// UserGrant grants a role to a specified user.
+	UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error)
+	// UserRevokeRole revokes a role of specified user.
+	UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error)
+	// RoleAdd adds a new role.
+	RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error)
+	// RoleGet gets detailed role information.
+	RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error)
+	// RoleList gets lists of all roles.
+	RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error)
+	// RoleDelete deletes a specified role.
+	RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error)
+	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
+	RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error)
+	// RoleRevokePermission revokes a key or range permission of a specified role.
+	RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error)
+}
+
+type authClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewAuthClient(cc *grpc.ClientConn) AuthClient {
+	return &authClient{cc}
+}
+
+func (c *authClient) AuthEnable(ctx context.Context, in *AuthEnableRequest, opts ...grpc.CallOption) (*AuthEnableResponse, error) {
+	out := new(AuthEnableResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthEnable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) AuthDisable(ctx context.Context, in *AuthDisableRequest, opts ...grpc.CallOption) (*AuthDisableResponse, error) {
+	out := new(AuthDisableResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/AuthDisable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) Authenticate(ctx context.Context, in *AuthenticateRequest, opts ...grpc.CallOption) (*AuthenticateResponse, error) {
+	out := new(AuthenticateResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/Authenticate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserAdd(ctx context.Context, in *AuthUserAddRequest, opts ...grpc.CallOption) (*AuthUserAddResponse, error) {
+	out := new(AuthUserAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserGet(ctx context.Context, in *AuthUserGetRequest, opts ...grpc.CallOption) (*AuthUserGetResponse, error) {
+	out := new(AuthUserGetResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGet", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserList(ctx context.Context, in *AuthUserListRequest, opts ...grpc.CallOption) (*AuthUserListResponse, error) {
+	out := new(AuthUserListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserDelete(ctx context.Context, in *AuthUserDeleteRequest, opts ...grpc.CallOption) (*AuthUserDeleteResponse, error) {
+	out := new(AuthUserDeleteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserDelete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserChangePassword(ctx context.Context, in *AuthUserChangePasswordRequest, opts ...grpc.CallOption) (*AuthUserChangePasswordResponse, error) {
+	out := new(AuthUserChangePasswordResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserChangePassword", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserGrantRole(ctx context.Context, in *AuthUserGrantRoleRequest, opts ...grpc.CallOption) (*AuthUserGrantRoleResponse, error) {
+	out := new(AuthUserGrantRoleResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserGrantRole", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) UserRevokeRole(ctx context.Context, in *AuthUserRevokeRoleRequest, opts ...grpc.CallOption) (*AuthUserRevokeRoleResponse, error) {
+	out := new(AuthUserRevokeRoleResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/UserRevokeRole", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleAdd(ctx context.Context, in *AuthRoleAddRequest, opts ...grpc.CallOption) (*AuthRoleAddResponse, error) {
+	out := new(AuthRoleAddResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleAdd", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleGet(ctx context.Context, in *AuthRoleGetRequest, opts ...grpc.CallOption) (*AuthRoleGetResponse, error) {
+	out := new(AuthRoleGetResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGet", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleList(ctx context.Context, in *AuthRoleListRequest, opts ...grpc.CallOption) (*AuthRoleListResponse, error) {
+	out := new(AuthRoleListResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleList", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleDelete(ctx context.Context, in *AuthRoleDeleteRequest, opts ...grpc.CallOption) (*AuthRoleDeleteResponse, error) {
+	out := new(AuthRoleDeleteResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleDelete", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleGrantPermission(ctx context.Context, in *AuthRoleGrantPermissionRequest, opts ...grpc.CallOption) (*AuthRoleGrantPermissionResponse, error) {
+	out := new(AuthRoleGrantPermissionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleGrantPermission", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *authClient) RoleRevokePermission(ctx context.Context, in *AuthRoleRevokePermissionRequest, opts ...grpc.CallOption) (*AuthRoleRevokePermissionResponse, error) {
+	out := new(AuthRoleRevokePermissionResponse)
+	err := c.cc.Invoke(ctx, "/etcdserverpb.Auth/RoleRevokePermission", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// AuthServer is the server API for Auth service.
+type AuthServer interface {
+	// AuthEnable enables authentication.
+	AuthEnable(context.Context, *AuthEnableRequest) (*AuthEnableResponse, error)
+	// AuthDisable disables authentication.
+	AuthDisable(context.Context, *AuthDisableRequest) (*AuthDisableResponse, error)
+	// Authenticate processes an authenticate request.
+	Authenticate(context.Context, *AuthenticateRequest) (*AuthenticateResponse, error)
+	// UserAdd adds a new user.
+	UserAdd(context.Context, *AuthUserAddRequest) (*AuthUserAddResponse, error)
+	// UserGet gets detailed user information.
+	UserGet(context.Context, *AuthUserGetRequest) (*AuthUserGetResponse, error)
+	// UserList gets a list of all users.
+	UserList(context.Context, *AuthUserListRequest) (*AuthUserListResponse, error)
+	// UserDelete deletes a specified user.
+	UserDelete(context.Context, *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error)
+	// UserChangePassword changes the password of a specified user.
+	UserChangePassword(context.Context, *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error)
+	// UserGrant grants a role to a specified user.
+	UserGrantRole(context.Context, *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error)
+	// UserRevokeRole revokes a role of specified user.
+	UserRevokeRole(context.Context, *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error)
+	// RoleAdd adds a new role.
+	RoleAdd(context.Context, *AuthRoleAddRequest) (*AuthRoleAddResponse, error)
+	// RoleGet gets detailed role information.
+	RoleGet(context.Context, *AuthRoleGetRequest) (*AuthRoleGetResponse, error)
+	// RoleList gets lists of all roles.
+	RoleList(context.Context, *AuthRoleListRequest) (*AuthRoleListResponse, error)
+	// RoleDelete deletes a specified role.
+	RoleDelete(context.Context, *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error)
+	// RoleGrantPermission grants a permission of a specified key or range to a specified role.
+	RoleGrantPermission(context.Context, *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error)
+	// RoleRevokePermission revokes a key or range permission of a specified role.
+	RoleRevokePermission(context.Context, *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error)
+}
+
+// UnimplementedAuthServer can be embedded to have forward compatible implementations.
+type UnimplementedAuthServer struct {
+}
+
+func (*UnimplementedAuthServer) AuthEnable(ctx context.Context, req *AuthEnableRequest) (*AuthEnableResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthEnable not implemented")
+}
+func (*UnimplementedAuthServer) AuthDisable(ctx context.Context, req *AuthDisableRequest) (*AuthDisableResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AuthDisable not implemented")
+}
+func (*UnimplementedAuthServer) Authenticate(ctx context.Context, req *AuthenticateRequest) (*AuthenticateResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method Authenticate not implemented")
+}
+func (*UnimplementedAuthServer) UserAdd(ctx context.Context, req *AuthUserAddRequest) (*AuthUserAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserAdd not implemented")
+}
+func (*UnimplementedAuthServer) UserGet(ctx context.Context, req *AuthUserGetRequest) (*AuthUserGetResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserGet not implemented")
+}
+func (*UnimplementedAuthServer) UserList(ctx context.Context, req *AuthUserListRequest) (*AuthUserListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserList not implemented")
+}
+func (*UnimplementedAuthServer) UserDelete(ctx context.Context, req *AuthUserDeleteRequest) (*AuthUserDeleteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserDelete not implemented")
+}
+func (*UnimplementedAuthServer) UserChangePassword(ctx context.Context, req *AuthUserChangePasswordRequest) (*AuthUserChangePasswordResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserChangePassword not implemented")
+}
+func (*UnimplementedAuthServer) UserGrantRole(ctx context.Context, req *AuthUserGrantRoleRequest) (*AuthUserGrantRoleResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserGrantRole not implemented")
+}
+func (*UnimplementedAuthServer) UserRevokeRole(ctx context.Context, req *AuthUserRevokeRoleRequest) (*AuthUserRevokeRoleResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UserRevokeRole not implemented")
+}
+func (*UnimplementedAuthServer) RoleAdd(ctx context.Context, req *AuthRoleAddRequest) (*AuthRoleAddResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleAdd not implemented")
+}
+func (*UnimplementedAuthServer) RoleGet(ctx context.Context, req *AuthRoleGetRequest) (*AuthRoleGetResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleGet not implemented")
+}
+func (*UnimplementedAuthServer) RoleList(ctx context.Context, req *AuthRoleListRequest) (*AuthRoleListResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleList not implemented")
+}
+func (*UnimplementedAuthServer) RoleDelete(ctx context.Context, req *AuthRoleDeleteRequest) (*AuthRoleDeleteResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleDelete not implemented")
+}
+func (*UnimplementedAuthServer) RoleGrantPermission(ctx context.Context, req *AuthRoleGrantPermissionRequest) (*AuthRoleGrantPermissionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleGrantPermission not implemented")
+}
+func (*UnimplementedAuthServer) RoleRevokePermission(ctx context.Context, req *AuthRoleRevokePermissionRequest) (*AuthRoleRevokePermissionResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RoleRevokePermission not implemented")
+}
+
+func RegisterAuthServer(s *grpc.Server, srv AuthServer) {
+	s.RegisterService(&_Auth_serviceDesc, srv)
+}
+
+func _Auth_AuthEnable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthEnableRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthEnable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthEnable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthEnable(ctx, req.(*AuthEnableRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_AuthDisable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthDisableRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).AuthDisable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/AuthDisable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).AuthDisable(ctx, req.(*AuthDisableRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_Authenticate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthenticateRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).Authenticate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/Authenticate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).Authenticate(ctx, req.(*AuthenticateRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserAdd(ctx, req.(*AuthUserAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserGetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserGet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserGet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserGet(ctx, req.(*AuthUserGetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserList(ctx, req.(*AuthUserListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserDeleteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserDelete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserDelete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserDelete(ctx, req.(*AuthUserDeleteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserChangePassword_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserChangePasswordRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserChangePassword(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserChangePassword",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserChangePassword(ctx, req.(*AuthUserChangePasswordRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserGrantRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserGrantRoleRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserGrantRole(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserGrantRole",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserGrantRole(ctx, req.(*AuthUserGrantRoleRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_UserRevokeRole_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthUserRevokeRoleRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).UserRevokeRole(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/UserRevokeRole",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).UserRevokeRole(ctx, req.(*AuthUserRevokeRoleRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleAdd_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleAddRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleAdd(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleAdd",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleAdd(ctx, req.(*AuthRoleAddRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGet_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleGetRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleGet(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleGet",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleGet(ctx, req.(*AuthRoleGetRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleList_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleListRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleList(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleList",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleList(ctx, req.(*AuthRoleListRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleDelete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleDeleteRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleDelete(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleDelete",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleDelete(ctx, req.(*AuthRoleDeleteRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleGrantPermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleGrantPermissionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleGrantPermission(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleGrantPermission",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleGrantPermission(ctx, req.(*AuthRoleGrantPermissionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Auth_RoleRevokePermission_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(AuthRoleRevokePermissionRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(AuthServer).RoleRevokePermission(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/etcdserverpb.Auth/RoleRevokePermission",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(AuthServer).RoleRevokePermission(ctx, req.(*AuthRoleRevokePermissionRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Auth_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "etcdserverpb.Auth",
+	HandlerType: (*AuthServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "AuthEnable",
+			Handler:    _Auth_AuthEnable_Handler,
+		},
+		{
+			MethodName: "AuthDisable",
+			Handler:    _Auth_AuthDisable_Handler,
+		},
+		{
+			MethodName: "Authenticate",
+			Handler:    _Auth_Authenticate_Handler,
+		},
+		{
+			MethodName: "UserAdd",
+			Handler:    _Auth_UserAdd_Handler,
+		},
+		{
+			MethodName: "UserGet",
+			Handler:    _Auth_UserGet_Handler,
+		},
+		{
+			MethodName: "UserList",
+			Handler:    _Auth_UserList_Handler,
+		},
+		{
+			MethodName: "UserDelete",
+			Handler:    _Auth_UserDelete_Handler,
+		},
+		{
+			MethodName: "UserChangePassword",
+			Handler:    _Auth_UserChangePassword_Handler,
+		},
+		{
+			MethodName: "UserGrantRole",
+			Handler:    _Auth_UserGrantRole_Handler,
+		},
+		{
+			MethodName: "UserRevokeRole",
+			Handler:    _Auth_UserRevokeRole_Handler,
+		},
+		{
+			MethodName: "RoleAdd",
+			Handler:    _Auth_RoleAdd_Handler,
+		},
+		{
+			MethodName: "RoleGet",
+			Handler:    _Auth_RoleGet_Handler,
+		},
+		{
+			MethodName: "RoleList",
+			Handler:    _Auth_RoleList_Handler,
+		},
+		{
+			MethodName: "RoleDelete",
+			Handler:    _Auth_RoleDelete_Handler,
+		},
+		{
+			MethodName: "RoleGrantPermission",
+			Handler:    _Auth_RoleGrantPermission_Handler,
+		},
+		{
+			MethodName: "RoleRevokePermission",
+			Handler:    _Auth_RoleRevokePermission_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "rpc.proto",
+}
+
+func (m *ResponseHeader) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResponseHeader) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseHeader) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RaftTerm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.MemberId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberId))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ClusterId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ClusterId))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.MaxCreateRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MaxCreateRevision))
+		i--
+		dAtA[i] = 0x68
+	}
+	if m.MinCreateRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MinCreateRevision))
+		i--
+		dAtA[i] = 0x60
+	}
+	if m.MaxModRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MaxModRevision))
+		i--
+		dAtA[i] = 0x58
+	}
+	if m.MinModRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MinModRevision))
+		i--
+		dAtA[i] = 0x50
+	}
+	if m.CountOnly {
+		i--
+		if m.CountOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x48
+	}
+	if m.KeysOnly {
+		i--
+		if m.KeysOnly {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.Serializable {
+		i--
+		if m.Serializable {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.SortTarget != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.SortTarget))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.SortOrder != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.SortOrder))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Limit != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Limit))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RangeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RangeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Count != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Count))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.More {
+		i--
+		if m.More {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Kvs) > 0 {
+		for iNdEx := len(m.Kvs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Kvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PutRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PutRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.IgnoreLease {
+		i--
+		if m.IgnoreLease {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.IgnoreValue {
+		i--
+		if m.IgnoreValue {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Lease != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Value) > 0 {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *PutResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *PutResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *PutResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv != nil {
+		{
+			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteRangeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DeleteRangeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DeleteRangeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DeleteRangeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PrevKvs) > 0 {
+		for iNdEx := len(m.PrevKvs) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.PrevKvs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Deleted != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Deleted))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RequestOp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *RequestOp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *RequestOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Request != nil {
+		{
+			size := m.Request.Size()
+			i -= size
+			if _, err := m.Request.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *RequestOp_RequestRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestRange != nil {
+		{
+			size, err := m.RequestRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestPut) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestPut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestPut != nil {
+		{
+			size, err := m.RequestPut.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestDeleteRange != nil {
+		{
+			size, err := m.RequestDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *RequestOp_RequestTxn) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *RequestOp_RequestTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.RequestTxn != nil {
+		{
+			size, err := m.RequestTxn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ResponseOp) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ResponseOp) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Response != nil {
+		{
+			size := m.Response.Size()
+			i -= size
+			if _, err := m.Response.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ResponseOp_ResponseRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponseRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseRange != nil {
+		{
+			size, err := m.ResponseRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponsePut) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponsePut) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponsePut != nil {
+		{
+			size, err := m.ResponsePut.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseDeleteRange) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponseDeleteRange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseDeleteRange != nil {
+		{
+			size, err := m.ResponseDeleteRange.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *ResponseOp_ResponseTxn) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *ResponseOp_ResponseTxn) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ResponseTxn != nil {
+		{
+			size, err := m.ResponseTxn.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x22
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Compare) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Compare) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Compare) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x4
+		i--
+		dAtA[i] = 0x82
+	}
+	if m.TargetUnion != nil {
+		{
+			size := m.TargetUnion.Size()
+			i -= size
+			if _, err := m.TargetUnion.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Target != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Target))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Result != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Result))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Compare_Version) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_Version) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.Version))
+	i--
+	dAtA[i] = 0x20
+	return len(dAtA) - i, nil
+}
+func (m *Compare_CreateRevision) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_CreateRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.CreateRevision))
+	i--
+	dAtA[i] = 0x28
+	return len(dAtA) - i, nil
+}
+func (m *Compare_ModRevision) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_ModRevision) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.ModRevision))
+	i--
+	dAtA[i] = 0x30
+	return len(dAtA) - i, nil
+}
+func (m *Compare_Value) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_Value) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.Value != nil {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x3a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *Compare_Lease) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *Compare_Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	i = encodeVarintRpc(dAtA, i, uint64(m.Lease))
+	i--
+	dAtA[i] = 0x40
+	return len(dAtA) - i, nil
+}
+func (m *TxnRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TxnRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Failure) > 0 {
+		for iNdEx := len(m.Failure) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Failure[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Success) > 0 {
+		for iNdEx := len(m.Success) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Success[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if len(m.Compare) > 0 {
+		for iNdEx := len(m.Compare) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Compare[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *TxnResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *TxnResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *TxnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Responses) > 0 {
+		for iNdEx := len(m.Responses) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Responses[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Succeeded {
+		i--
+		if m.Succeeded {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CompactionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CompactionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Physical {
+		i--
+		if m.Physical {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *CompactionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *CompactionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *CompactionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashKVRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashKVRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Revision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Revision))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashKVResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashKVResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashKVResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.CompactRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.Hash != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *HashResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HashResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HashResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Hash != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Hash))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Blob) > 0 {
+		i -= len(m.Blob)
+		copy(dAtA[i:], m.Blob)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Blob)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.RemainingBytes != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RemainingBytes))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RequestUnion != nil {
+		{
+			size := m.RequestUnion.Size()
+			i -= size
+			if _, err := m.RequestUnion.MarshalTo(dAtA[i:]); err != nil {
+				return 0, err
+			}
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchRequest_CreateRequest) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *WatchRequest_CreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.CreateRequest != nil {
+		{
+			size, err := m.CreateRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchRequest_CancelRequest) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *WatchRequest_CancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.CancelRequest != nil {
+		{
+			size, err := m.CancelRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchRequest_ProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+	return m.MarshalToSizedBuffer(dAtA[:m.Size()])
+}
+
+func (m *WatchRequest_ProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	if m.ProgressRequest != nil {
+		{
+			size, err := m.ProgressRequest.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	return len(dAtA) - i, nil
+}
+func (m *WatchCreateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchCreateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCreateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Fragment {
+		i--
+		if m.Fragment {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x40
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x38
+	}
+	if m.PrevKv {
+		i--
+		if m.PrevKv {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x30
+	}
+	if len(m.Filters) > 0 {
+		dAtA22 := make([]byte, len(m.Filters)*10)
+		var j21 int
+		for _, num := range m.Filters {
+			for num >= 1<<7 {
+				dAtA22[j21] = uint8(uint64(num)&0x7f | 0x80)
+				num >>= 7
+				j21++
+			}
+			dAtA22[j21] = uint8(num)
+			j21++
+		}
+		i -= j21
+		copy(dAtA[i:], dAtA22[:j21])
+		i = encodeVarintRpc(dAtA, i, uint64(j21))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.ProgressNotify {
+		i--
+		if m.ProgressNotify {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.StartRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.StartRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchCancelRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchCancelRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchCancelRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchProgressRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchProgressRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchProgressRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *WatchResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *WatchResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *WatchResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Events) > 0 {
+		for iNdEx := len(m.Events) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Events[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x5a
+		}
+	}
+	if m.Fragment {
+		i--
+		if m.Fragment {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x38
+	}
+	if len(m.CancelReason) > 0 {
+		i -= len(m.CancelReason)
+		copy(dAtA[i:], m.CancelReason)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.CancelReason)))
+		i--
+		dAtA[i] = 0x32
+	}
+	if m.CompactRevision != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.CompactRevision))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Canceled {
+		i--
+		if m.Canceled {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.Created {
+		i--
+		if m.Created {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.WatchId != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.WatchId))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseGrantRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseGrantResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseGrantResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseGrantResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Error) > 0 {
+		i -= len(m.Error)
+		copy(dAtA[i:], m.Error)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Error)))
+		i--
+		dAtA[i] = 0x22
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseRevokeResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseRevokeResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseRevokeResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseKeepAliveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseKeepAliveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseKeepAliveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Keys {
+		i--
+		if m.Keys {
+			dAtA[i] = 1
+		} else {
+			dAtA[i] = 0
+		}
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseTimeToLiveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseTimeToLiveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Keys) > 0 {
+		for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Keys[iNdEx])
+			copy(dAtA[i:], m.Keys[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Keys[iNdEx])))
+			i--
+			dAtA[i] = 0x2a
+		}
+	}
+	if m.GrantedTTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.GrantedTTL))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.TTL != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TTL))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseStatus) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseStatus) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *LeaseLeasesResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *LeaseLeasesResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *LeaseLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Leases) > 0 {
+		for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Member) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Member) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Member) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.ClientURLs) > 0 {
+		for iNdEx := len(m.ClientURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.ClientURLs[iNdEx])
+			copy(dAtA[i:], m.ClientURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.ClientURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x22
+		}
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0xa
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x1a
+		}
+	}
+	if m.Member != nil {
+		{
+			size, err := m.Member.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberRemoveRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberRemoveResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberRemoveResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberRemoveResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberUpdateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.PeerURLs) > 0 {
+		for iNdEx := len(m.PeerURLs) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.PeerURLs[iNdEx])
+			copy(dAtA[i:], m.PeerURLs[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.PeerURLs[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.ID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.ID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberUpdateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberUpdateResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberUpdateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MemberListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MemberListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MemberListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Members) > 0 {
+		for iNdEx := len(m.Members) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Members[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DefragmentRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DefragmentRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *DefragmentResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *DefragmentResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *DefragmentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MoveLeaderRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.TargetID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.TargetID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *MoveLeaderResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *MoveLeaderResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *MoveLeaderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Alarm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.MemberID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.Action != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Action))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmMember) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmMember) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmMember) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Alarm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Alarm))
+		i--
+		dAtA[i] = 0x10
+	}
+	if m.MemberID != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.MemberID))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AlarmResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AlarmResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AlarmResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Alarms) > 0 {
+		for iNdEx := len(m.Alarms) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Alarms[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *StatusResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *StatusResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.RaftTerm != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftTerm))
+		i--
+		dAtA[i] = 0x30
+	}
+	if m.RaftIndex != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.RaftIndex))
+		i--
+		dAtA[i] = 0x28
+	}
+	if m.Leader != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.Leader))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.DbSize != 0 {
+		i = encodeVarintRpc(dAtA, i, uint64(m.DbSize))
+		i--
+		dAtA[i] = 0x18
+	}
+	if len(m.Version) > 0 {
+		i -= len(m.Version)
+		copy(dAtA[i:], m.Version)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Version)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthEnableRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthDisableRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthenticateRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Password) > 0 {
+		i -= len(m.Password)
+		copy(dAtA[i:], m.Password)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Password)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.User) > 0 {
+		i -= len(m.User)
+		copy(dAtA[i:], m.User)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.User)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleListRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Perm != nil {
+		{
+			size, err := m.Perm.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Name) > 0 {
+		i -= len(m.Name)
+		copy(dAtA[i:], m.Name)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Name)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.RangeEnd) > 0 {
+		i -= len(m.RangeEnd)
+		copy(dAtA[i:], m.RangeEnd)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.RangeEnd)))
+		i--
+		dAtA[i] = 0x1a
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if len(m.Role) > 0 {
+		i -= len(m.Role)
+		copy(dAtA[i:], m.Role)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Role)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthEnableResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthEnableResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthEnableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthDisableResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthDisableResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthDisableResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthenticateResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthenticateResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthenticateResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Token) > 0 {
+		i -= len(m.Token)
+		copy(dAtA[i:], m.Token)
+		i = encodeVarintRpc(dAtA, i, uint64(len(m.Token)))
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGetResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserDeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserChangePasswordResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserChangePasswordResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserGrantRoleResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserGrantRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserRevokeRoleResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserRevokeRoleResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleAddResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleAddResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleAddResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGetResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGetResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGetResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Perm) > 0 {
+		for iNdEx := len(m.Perm) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Perm[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRpc(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Roles) > 0 {
+		for iNdEx := len(m.Roles) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Roles[iNdEx])
+			copy(dAtA[i:], m.Roles[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Roles[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthUserListResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthUserListResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthUserListResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Users) > 0 {
+		for iNdEx := len(m.Users) - 1; iNdEx >= 0; iNdEx-- {
+			i -= len(m.Users[iNdEx])
+			copy(dAtA[i:], m.Users[iNdEx])
+			i = encodeVarintRpc(dAtA, i, uint64(len(m.Users[iNdEx])))
+			i--
+			dAtA[i] = 0x12
+		}
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleDeleteResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleDeleteResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleDeleteResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleGrantPermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *AuthRoleRevokePermissionResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Header != nil {
+		{
+			size, err := m.Header.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintRpc(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRpc(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRpc(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *ResponseHeader) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ClusterId != 0 {
+		n += 1 + sovRpc(uint64(m.ClusterId))
+	}
+	if m.MemberId != 0 {
+		n += 1 + sovRpc(uint64(m.MemberId))
+	}
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.RaftTerm != 0 {
+		n += 1 + sovRpc(uint64(m.RaftTerm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RangeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Limit != 0 {
+		n += 1 + sovRpc(uint64(m.Limit))
+	}
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.SortOrder != 0 {
+		n += 1 + sovRpc(uint64(m.SortOrder))
+	}
+	if m.SortTarget != 0 {
+		n += 1 + sovRpc(uint64(m.SortTarget))
+	}
+	if m.Serializable {
+		n += 2
+	}
+	if m.KeysOnly {
+		n += 2
+	}
+	if m.CountOnly {
+		n += 2
+	}
+	if m.MinModRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MinModRevision))
+	}
+	if m.MaxModRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MaxModRevision))
+	}
+	if m.MinCreateRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MinCreateRevision))
+	}
+	if m.MaxCreateRevision != 0 {
+		n += 1 + sovRpc(uint64(m.MaxCreateRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RangeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Kvs) > 0 {
+		for _, e := range m.Kvs {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.More {
+		n += 2
+	}
+	if m.Count != 0 {
+		n += 1 + sovRpc(uint64(m.Count))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *PutRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Lease != 0 {
+		n += 1 + sovRpc(uint64(m.Lease))
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.IgnoreValue {
+		n += 2
+	}
+	if m.IgnoreLease {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *PutResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.PrevKv != nil {
+		l = m.PrevKv.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DeleteRangeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DeleteRangeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Deleted != 0 {
+		n += 1 + sovRpc(uint64(m.Deleted))
+	}
+	if len(m.PrevKvs) > 0 {
+		for _, e := range m.PrevKvs {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RequestOp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Request != nil {
+		n += m.Request.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *RequestOp_RequestRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestRange != nil {
+		l = m.RequestRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestPut) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestPut != nil {
+		l = m.RequestPut.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestDeleteRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestDeleteRange != nil {
+		l = m.RequestDeleteRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *RequestOp_RequestTxn) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestTxn != nil {
+		l = m.RequestTxn.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Response != nil {
+		n += m.Response.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ResponseOp_ResponseRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseRange != nil {
+		l = m.ResponseRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponsePut) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponsePut != nil {
+		l = m.ResponsePut.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponseDeleteRange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseDeleteRange != nil {
+		l = m.ResponseDeleteRange.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *ResponseOp_ResponseTxn) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ResponseTxn != nil {
+		l = m.ResponseTxn.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *Compare) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Result != 0 {
+		n += 1 + sovRpc(uint64(m.Result))
+	}
+	if m.Target != 0 {
+		n += 1 + sovRpc(uint64(m.Target))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.TargetUnion != nil {
+		n += m.TargetUnion.Size()
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 2 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Compare_Version) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.Version))
+	return n
+}
+func (m *Compare_CreateRevision) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.CreateRevision))
+	return n
+}
+func (m *Compare_ModRevision) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.ModRevision))
+	return n
+}
+func (m *Compare_Value) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Value != nil {
+		l = len(m.Value)
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *Compare_Lease) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRpc(uint64(m.Lease))
+	return n
+}
+func (m *TxnRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Compare) > 0 {
+		for _, e := range m.Compare {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.Success) > 0 {
+		for _, e := range m.Success {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.Failure) > 0 {
+		for _, e := range m.Failure {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *TxnResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Succeeded {
+		n += 2
+	}
+	if len(m.Responses) > 0 {
+		for _, e := range m.Responses {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *CompactionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.Physical {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *CompactionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashKVRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Revision != 0 {
+		n += 1 + sovRpc(uint64(m.Revision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashKVResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Hash != 0 {
+		n += 1 + sovRpc(uint64(m.Hash))
+	}
+	if m.CompactRevision != 0 {
+		n += 1 + sovRpc(uint64(m.CompactRevision))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HashResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Hash != 0 {
+		n += 1 + sovRpc(uint64(m.Hash))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.RemainingBytes != 0 {
+		n += 1 + sovRpc(uint64(m.RemainingBytes))
+	}
+	l = len(m.Blob)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.RequestUnion != nil {
+		n += m.RequestUnion.Size()
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchRequest_CreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CreateRequest != nil {
+		l = m.CreateRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchRequest_CancelRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.CancelRequest != nil {
+		l = m.CancelRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchRequest_ProgressRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ProgressRequest != nil {
+		l = m.ProgressRequest.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	return n
+}
+func (m *WatchCreateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.StartRevision != 0 {
+		n += 1 + sovRpc(uint64(m.StartRevision))
+	}
+	if m.ProgressNotify {
+		n += 2
+	}
+	if len(m.Filters) > 0 {
+		l = 0
+		for _, e := range m.Filters {
+			l += sovRpc(uint64(e))
+		}
+		n += 1 + sovRpc(uint64(l)) + l
+	}
+	if m.PrevKv {
+		n += 2
+	}
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.Fragment {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchCancelRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchProgressRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *WatchResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.WatchId != 0 {
+		n += 1 + sovRpc(uint64(m.WatchId))
+	}
+	if m.Created {
+		n += 2
+	}
+	if m.Canceled {
+		n += 2
+	}
+	if m.CompactRevision != 0 {
+		n += 1 + sovRpc(uint64(m.CompactRevision))
+	}
+	l = len(m.CancelReason)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Fragment {
+		n += 2
+	}
+	if len(m.Events) > 0 {
+		for _, e := range m.Events {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseGrantRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseGrantResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	l = len(m.Error)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseRevokeRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseRevokeResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseKeepAliveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseKeepAliveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseTimeToLiveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.Keys {
+		n += 2
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseTimeToLiveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.TTL != 0 {
+		n += 1 + sovRpc(uint64(m.TTL))
+	}
+	if m.GrantedTTL != 0 {
+		n += 1 + sovRpc(uint64(m.GrantedTTL))
+	}
+	if len(m.Keys) > 0 {
+		for _, b := range m.Keys {
+			l = len(b)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseLeasesRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseStatus) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *LeaseLeasesResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Leases) > 0 {
+		for _, e := range m.Leases {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Member) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if len(m.ClientURLs) > 0 {
+		for _, s := range m.ClientURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Member != nil {
+		l = m.Member.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberRemoveRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberRemoveResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberUpdateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.ID != 0 {
+		n += 1 + sovRpc(uint64(m.ID))
+	}
+	if len(m.PeerURLs) > 0 {
+		for _, s := range m.PeerURLs {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberUpdateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MemberListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Members) > 0 {
+		for _, e := range m.Members {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DefragmentRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *DefragmentResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MoveLeaderRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.TargetID != 0 {
+		n += 1 + sovRpc(uint64(m.TargetID))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *MoveLeaderResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Action != 0 {
+		n += 1 + sovRpc(uint64(m.Action))
+	}
+	if m.MemberID != 0 {
+		n += 1 + sovRpc(uint64(m.MemberID))
+	}
+	if m.Alarm != 0 {
+		n += 1 + sovRpc(uint64(m.Alarm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmMember) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.MemberID != 0 {
+		n += 1 + sovRpc(uint64(m.MemberID))
+	}
+	if m.Alarm != 0 {
+		n += 1 + sovRpc(uint64(m.Alarm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AlarmResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Alarms) > 0 {
+		for _, e := range m.Alarms {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *StatusRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *StatusResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Version)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.DbSize != 0 {
+		n += 1 + sovRpc(uint64(m.DbSize))
+	}
+	if m.Leader != 0 {
+		n += 1 + sovRpc(uint64(m.Leader))
+	}
+	if m.RaftIndex != 0 {
+		n += 1 + sovRpc(uint64(m.RaftIndex))
+	}
+	if m.RaftTerm != 0 {
+		n += 1 + sovRpc(uint64(m.RaftTerm))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthEnableRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthDisableRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthenticateRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserDeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserChangePasswordRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Password)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGrantRoleRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.User)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserRevokeRoleRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleAddRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGetRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleListRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleDeleteRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGrantPermissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Name)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.Perm != nil {
+		l = m.Perm.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleRevokePermissionRequest) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Role)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.RangeEnd)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthEnableResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthDisableResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthenticateResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	l = len(m.Token)
+	if l > 0 {
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserDeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserChangePasswordResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserGrantRoleResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserRevokeRoleResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleAddResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGetResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Perm) > 0 {
+		for _, e := range m.Perm {
+			l = e.Size()
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Roles) > 0 {
+		for _, s := range m.Roles {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthUserListResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if len(m.Users) > 0 {
+		for _, s := range m.Users {
+			l = len(s)
+			n += 1 + l + sovRpc(uint64(l))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleDeleteResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleGrantPermissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *AuthRoleRevokePermissionResponse) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Header != nil {
+		l = m.Header.Size()
+		n += 1 + l + sovRpc(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRpc(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRpc(x uint64) (n int) {
+	return sovRpc(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *ResponseHeader) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResponseHeader: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResponseHeader: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClusterId", wireType)
+			}
+			m.ClusterId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ClusterId |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberId", wireType)
+			}
+			m.MemberId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberId |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+			}
+			m.RaftTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Limit", wireType)
+			}
+			m.Limit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Limit |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SortOrder", wireType)
+			}
+			m.SortOrder = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SortOrder |= RangeRequest_SortOrder(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field SortTarget", wireType)
+			}
+			m.SortTarget = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.SortTarget |= RangeRequest_SortTarget(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Serializable", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Serializable = bool(v != 0)
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field KeysOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.KeysOnly = bool(v != 0)
+		case 9:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CountOnly", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.CountOnly = bool(v != 0)
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinModRevision", wireType)
+			}
+			m.MinModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxModRevision", wireType)
+			}
+			m.MaxModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MinCreateRevision", wireType)
+			}
+			m.MinCreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MinCreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 13:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MaxCreateRevision", wireType)
+			}
+			m.MaxCreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MaxCreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RangeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RangeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kvs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Kvs = append(m.Kvs, &mvccpb.KeyValue{})
+			if err := m.Kvs[len(m.Kvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field More", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.More = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType)
+			}
+			m.Count = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Count |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PutRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PutRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PutRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			m.Lease = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lease |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreValue", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IgnoreValue = bool(v != 0)
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field IgnoreLease", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.IgnoreLease = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *PutResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: PutResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: PutResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PrevKv == nil {
+				m.PrevKv = &mvccpb.KeyValue{}
+			}
+			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteRangeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteRangeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteRangeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DeleteRangeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DeleteRangeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DeleteRangeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Deleted", wireType)
+			}
+			m.Deleted = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Deleted |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKvs", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PrevKvs = append(m.PrevKvs, &mvccpb.KeyValue{})
+			if err := m.PrevKvs[len(m.PrevKvs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *RequestOp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: RequestOp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: RequestOp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &RangeRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestRange{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestPut", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &PutRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestPut{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestDeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &DeleteRangeRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestDeleteRange{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RequestTxn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &TxnRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Request = &RequestOp_RequestTxn{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ResponseOp) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ResponseOp: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ResponseOp: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &RangeResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseRange{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponsePut", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &PutResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponsePut{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseDeleteRange", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &DeleteRangeResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseDeleteRange{v}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ResponseTxn", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &TxnResponse{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.Response = &ResponseOp_ResponseTxn{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Compare) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Compare: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Compare: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType)
+			}
+			m.Result = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Result |= Compare_CompareResult(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Target", wireType)
+			}
+			m.Target = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Target |= Compare_CompareTarget(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_Version{v}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_CreateRevision{v}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_ModRevision{v}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := make([]byte, postIndex-iNdEx)
+			copy(v, dAtA[iNdEx:postIndex])
+			m.TargetUnion = &Compare_Value{v}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			var v int64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.TargetUnion = &Compare_Lease{v}
+		case 64:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TxnRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TxnRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TxnRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Compare", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Compare = append(m.Compare, &Compare{})
+			if err := m.Compare[len(m.Compare)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Success", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Success = append(m.Success, &RequestOp{})
+			if err := m.Success[len(m.Success)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Failure", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Failure = append(m.Failure, &RequestOp{})
+			if err := m.Failure[len(m.Failure)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *TxnResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: TxnResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: TxnResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Succeeded", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Succeeded = bool(v != 0)
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Responses", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Responses = append(m.Responses, &ResponseOp{})
+			if err := m.Responses[len(m.Responses)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CompactionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CompactionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CompactionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Physical", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Physical = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *CompactionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: CompactionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: CompactionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashKVRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashKVRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashKVRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Revision", wireType)
+			}
+			m.Revision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Revision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashKVResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashKVResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashKVResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+			}
+			m.Hash = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Hash |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+			}
+			m.CompactRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CompactRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HashResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HashResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HashResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType)
+			}
+			m.Hash = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Hash |= uint32(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RemainingBytes", wireType)
+			}
+			m.RemainingBytes = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RemainingBytes |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Blob", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Blob = append(m.Blob[:0], dAtA[iNdEx:postIndex]...)
+			if m.Blob == nil {
+				m.Blob = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchCreateRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_CreateRequest{v}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CancelRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchCancelRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_CancelRequest{v}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProgressRequest", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			v := &WatchProgressRequest{}
+			if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			m.RequestUnion = &WatchRequest_ProgressRequest{v}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchCreateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchCreateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchCreateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = append(m.RangeEnd[:0], dAtA[iNdEx:postIndex]...)
+			if m.RangeEnd == nil {
+				m.RangeEnd = []byte{}
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field StartRevision", wireType)
+			}
+			m.StartRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.StartRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ProgressNotify", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.ProgressNotify = bool(v != 0)
+		case 5:
+			if wireType == 0 {
+				var v WatchCreateRequest_FilterType
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Filters = append(m.Filters, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRpc
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRpc
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				if elementCount != 0 && len(m.Filters) == 0 {
+					m.Filters = make([]WatchCreateRequest_FilterType, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v WatchCreateRequest_FilterType
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRpc
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= WatchCreateRequest_FilterType(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Filters = append(m.Filters, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.PrevKv = bool(v != 0)
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Fragment = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchCancelRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchCancelRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchCancelRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchProgressRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchProgressRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchProgressRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *WatchResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: WatchResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: WatchResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field WatchId", wireType)
+			}
+			m.WatchId = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.WatchId |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Created", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Created = bool(v != 0)
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Canceled", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Canceled = bool(v != 0)
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CompactRevision", wireType)
+			}
+			m.CompactRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CompactRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CancelReason", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.CancelReason = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 7:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Fragment", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Fragment = bool(v != 0)
+		case 11:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Events", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Events = append(m.Events, &mvccpb.Event{})
+			if err := m.Events[len(m.Events)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseGrantRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseGrantRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseGrantRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseGrantResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseGrantResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseGrantResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Error", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Error = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseRevokeRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseRevokeRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseRevokeRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseRevokeResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseRevokeResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseRevokeResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseKeepAliveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseKeepAliveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseKeepAliveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseKeepAliveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseKeepAliveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseKeepAliveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseTimeToLiveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseTimeToLiveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseTimeToLiveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Keys = bool(v != 0)
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseTimeToLiveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseTimeToLiveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseTimeToLiveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TTL", wireType)
+			}
+			m.TTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field GrantedTTL", wireType)
+			}
+			m.GrantedTTL = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.GrantedTTL |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Keys = append(m.Keys, make([]byte, postIndex-iNdEx))
+			copy(m.Keys[len(m.Keys)-1], dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseLeasesRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseLeasesRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseStatus) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseStatus: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseStatus: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *LeaseLeasesResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: LeaseLeasesResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: LeaseLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Leases = append(m.Leases, &LeaseStatus{})
+			if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Member) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Member: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Member: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ClientURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.ClientURLs = append(m.ClientURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Member", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Member == nil {
+				m.Member = &Member{}
+			}
+			if err := m.Member.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberRemoveRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberRemoveRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberRemoveRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberRemoveResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberRemoveResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberRemoveResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberUpdateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberUpdateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberUpdateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PeerURLs", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.PeerURLs = append(m.PeerURLs, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberUpdateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberUpdateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberUpdateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MemberListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MemberListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MemberListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Members", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Members = append(m.Members, &Member{})
+			if err := m.Members[len(m.Members)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DefragmentRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DefragmentRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DefragmentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *DefragmentResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: DefragmentResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: DefragmentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MoveLeaderRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MoveLeaderRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MoveLeaderRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field TargetID", wireType)
+			}
+			m.TargetID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.TargetID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *MoveLeaderResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: MoveLeaderResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: MoveLeaderResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Action", wireType)
+			}
+			m.Action = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Action |= AlarmRequest_AlarmAction(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+			}
+			m.MemberID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			m.Alarm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Alarm |= AlarmType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmMember) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmMember: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmMember: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field MemberID", wireType)
+			}
+			m.MemberID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.MemberID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarm", wireType)
+			}
+			m.Alarm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Alarm |= AlarmType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AlarmResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AlarmResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AlarmResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Alarms", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Alarms = append(m.Alarms, &AlarmMember{})
+			if err := m.Alarms[len(m.Alarms)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *StatusResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: StatusResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: StatusResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Version = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field DbSize", wireType)
+			}
+			m.DbSize = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.DbSize |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Leader", wireType)
+			}
+			m.Leader = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Leader |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftIndex", wireType)
+			}
+			m.RaftIndex = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftIndex |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RaftTerm", wireType)
+			}
+			m.RaftTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RaftTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthEnableRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthEnableRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthEnableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthDisableRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthDisableRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthDisableRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthenticateRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthenticateRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthenticateRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserDeleteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserDeleteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserChangePasswordRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserChangePasswordRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserChangePasswordRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Password = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGrantRoleRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGrantRoleRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGrantRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field User", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.User = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserRevokeRoleRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleAddRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleAddRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleAddRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGetRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGetRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGetRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleListRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleListRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleListRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleDeleteRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleDeleteRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleDeleteRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGrantPermissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Name = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Perm == nil {
+				m.Perm = &authpb.Permission{}
+			}
+			if err := m.Perm.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleRevokePermissionRequest) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionRequest: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Role", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Role = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RangeEnd", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.RangeEnd = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthEnableResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthEnableResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthEnableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthDisableResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthDisableResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthDisableResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthenticateResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthenticateResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthenticateResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Token", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Token = string(dAtA[iNdEx:postIndex])
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserDeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserDeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserChangePasswordResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserChangePasswordResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserChangePasswordResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserGrantRoleResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserGrantRoleResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserGrantRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserRevokeRoleResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserRevokeRoleResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleAddResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleAddResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleAddResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGetResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGetResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGetResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Perm", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Perm = append(m.Perm, &authpb.Permission{})
+			if err := m.Perm[len(m.Perm)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Roles", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Roles = append(m.Roles, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthUserListResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthUserListResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthUserListResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Users", wireType)
+			}
+			var stringLen uint64
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				stringLen |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			intStringLen := int(stringLen)
+			if intStringLen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + intStringLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Users = append(m.Users, string(dAtA[iNdEx:postIndex]))
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleDeleteResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleDeleteResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleDeleteResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleGrantPermissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleGrantPermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *AuthRoleRevokePermissionResponse) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: AuthRoleRevokePermissionResponse: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Header", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRpc
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Header == nil {
+				m.Header = &ResponseHeader{}
+			}
+			if err := m.Header.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRpc(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRpc
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRpc(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRpc
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRpc
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRpc
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRpc
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRpc
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRpc(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRpc
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRpc = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRpc   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
new file mode 100644
index 0000000..d9da43c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/etcdserver/etcdserverpb/rpc.proto
@@ -0,0 +1,1075 @@
+syntax = "proto3";
+package etcdserverpb;
+
+import "gogoproto/gogo.proto";
+import "etcd/mvcc/mvccpb/kv.proto";
+import "etcd/auth/authpb/auth.proto";
+
+// for grpc-gateway
+import "google/api/annotations.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+
+service KV {
+  // Range gets the keys in the range from the key-value store.
+  rpc Range(RangeRequest) returns (RangeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/range"
+        body: "*"
+    };
+  }
+
+  // Put puts the given key into the key-value store.
+  // A put request increments the revision of the key-value store
+  // and generates one event in the event history.
+  rpc Put(PutRequest) returns (PutResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/put"
+        body: "*"
+    };
+  }
+
+  // DeleteRange deletes the given range from the key-value store.
+  // A delete request increments the revision of the key-value store
+  // and generates a delete event in the event history for every deleted key.
+  rpc DeleteRange(DeleteRangeRequest) returns (DeleteRangeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/deleterange"
+        body: "*"
+    };
+  }
+
+  // Txn processes multiple requests in a single transaction.
+  // A txn request increments the revision of the key-value store
+  // and generates events with the same revision for every completed request.
+  // It is not allowed to modify the same key several times within one txn.
+  rpc Txn(TxnRequest) returns (TxnResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/txn"
+        body: "*"
+    };
+  }
+
+  // Compact compacts the event history in the etcd key-value store. The key-value
+  // store should be periodically compacted or the event history will continue to grow
+  // indefinitely.
+  rpc Compact(CompactionRequest) returns (CompactionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/compaction"
+        body: "*"
+    };
+  }
+}
+
+service Watch {
+  // Watch watches for events happening or that have happened. Both input and output
+  // are streams; the input stream is for creating and canceling watchers and the output
+  // stream sends events. One watch RPC can watch on multiple key ranges, streaming events
+  // for several watches at once. The entire event history can be watched starting from the
+  // last compaction revision.
+  rpc Watch(stream WatchRequest) returns (stream WatchResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/watch"
+        body: "*"
+    };
+  }
+}
+
+service Lease {
+  // LeaseGrant creates a lease which expires if the server does not receive a keepAlive
+  // within a given time to live period. All keys attached to the lease will be expired and
+  // deleted if the lease expires. Each expired key generates a delete event in the event history.
+  rpc LeaseGrant(LeaseGrantRequest) returns (LeaseGrantResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lease/grant"
+        body: "*"
+    };
+  }
+
+  // LeaseRevoke revokes a lease. All keys attached to the lease will expire and be deleted.
+  rpc LeaseRevoke(LeaseRevokeRequest) returns (LeaseRevokeResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/revoke"
+        body: "*"
+    };
+  }
+
+  // LeaseKeepAlive keeps the lease alive by streaming keep alive requests from the client
+  // to the server and streaming keep alive responses from the server to the client.
+  rpc LeaseKeepAlive(stream LeaseKeepAliveRequest) returns (stream LeaseKeepAliveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/lease/keepalive"
+        body: "*"
+    };
+  }
+
+  // LeaseTimeToLive retrieves lease information.
+  rpc LeaseTimeToLive(LeaseTimeToLiveRequest) returns (LeaseTimeToLiveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/timetolive"
+        body: "*"
+    };
+  }
+
+  // LeaseLeases lists all existing leases.
+  rpc LeaseLeases(LeaseLeasesRequest) returns (LeaseLeasesResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/kv/lease/leases"
+        body: "*"
+    };
+  }
+}
+
+service Cluster {
+  // MemberAdd adds a member into the cluster.
+  rpc MemberAdd(MemberAddRequest) returns (MemberAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/add"
+        body: "*"
+    };
+  }
+
+  // MemberRemove removes an existing member from the cluster.
+  rpc MemberRemove(MemberRemoveRequest) returns (MemberRemoveResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/remove"
+        body: "*"
+    };
+  }
+
+  // MemberUpdate updates the member configuration.
+  rpc MemberUpdate(MemberUpdateRequest) returns (MemberUpdateResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/update"
+        body: "*"
+    };
+  }
+
+  // MemberList lists all the members in the cluster.
+  rpc MemberList(MemberListRequest) returns (MemberListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/cluster/member/list"
+        body: "*"
+    };
+  }
+}
+
+service Maintenance {
+  // Alarm activates, deactivates, and queries alarms regarding cluster health.
+  rpc Alarm(AlarmRequest) returns (AlarmResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/alarm"
+        body: "*"
+    };
+  }
+
+  // Status gets the status of the member.
+  rpc Status(StatusRequest) returns (StatusResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/status"
+        body: "*"
+    };
+  }
+
+  // Defragment defragments a member's backend database to recover storage space.
+  rpc Defragment(DefragmentRequest) returns (DefragmentResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/defragment"
+        body: "*"
+    };
+  }
+
+  // Hash computes the hash of the KV's backend.
+  // This is designed for testing; do not use this in production when there
+  // are ongoing transactions.
+  rpc Hash(HashRequest) returns (HashResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // HashKV computes the hash of all MVCC keys up to a given revision.
+  rpc HashKV(HashKVRequest) returns (HashKVResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/hash"
+        body: "*"
+    };
+  }
+
+  // Snapshot sends a snapshot of the entire backend from a member over a stream to a client.
+  rpc Snapshot(SnapshotRequest) returns (stream SnapshotResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/snapshot"
+        body: "*"
+    };
+  }
+
+  // MoveLeader requests current leader node to transfer its leadership to transferee.
+  rpc MoveLeader(MoveLeaderRequest) returns (MoveLeaderResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/maintenance/transfer-leadership"
+        body: "*"
+    };
+  }
+}
+
+service Auth {
+  // AuthEnable enables authentication.
+  rpc AuthEnable(AuthEnableRequest) returns (AuthEnableResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/enable"
+        body: "*"
+    };
+  }
+
+  // AuthDisable disables authentication.
+  rpc AuthDisable(AuthDisableRequest) returns (AuthDisableResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/disable"
+        body: "*"
+    };
+  }
+
+  // Authenticate processes an authenticate request.
+  rpc Authenticate(AuthenticateRequest) returns (AuthenticateResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/authenticate"
+        body: "*"
+    };
+  }
+
+  // UserAdd adds a new user.
+  rpc UserAdd(AuthUserAddRequest) returns (AuthUserAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/add"
+        body: "*"
+    };
+  }
+
+  // UserGet gets detailed user information.
+  rpc UserGet(AuthUserGetRequest) returns (AuthUserGetResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/get"
+        body: "*"
+    };
+  }
+
+  // UserList gets a list of all users.
+  rpc UserList(AuthUserListRequest) returns (AuthUserListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/list"
+        body: "*"
+    };
+  }
+
+  // UserDelete deletes a specified user.
+  rpc UserDelete(AuthUserDeleteRequest) returns (AuthUserDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/delete"
+        body: "*"
+    };
+  }
+
+  // UserChangePassword changes the password of a specified user.
+  rpc UserChangePassword(AuthUserChangePasswordRequest) returns (AuthUserChangePasswordResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/changepw"
+        body: "*"
+    };
+  }
+
+  // UserGrant grants a role to a specified user.
+  rpc UserGrantRole(AuthUserGrantRoleRequest) returns (AuthUserGrantRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/grant"
+        body: "*"
+    };
+  }
+
+  // UserRevokeRole revokes a role of specified user.
+  rpc UserRevokeRole(AuthUserRevokeRoleRequest) returns (AuthUserRevokeRoleResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/user/revoke"
+        body: "*"
+    };
+  }
+
+  // RoleAdd adds a new role.
+  rpc RoleAdd(AuthRoleAddRequest) returns (AuthRoleAddResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/add"
+        body: "*"
+    };
+  }
+
+  // RoleGet gets detailed role information.
+  rpc RoleGet(AuthRoleGetRequest) returns (AuthRoleGetResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/get"
+        body: "*"
+    };
+  }
+
+  // RoleList gets lists of all roles.
+  rpc RoleList(AuthRoleListRequest) returns (AuthRoleListResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/list"
+        body: "*"
+    };
+  }
+
+  // RoleDelete deletes a specified role.
+  rpc RoleDelete(AuthRoleDeleteRequest) returns (AuthRoleDeleteResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/delete"
+        body: "*"
+    };
+  }
+
+  // RoleGrantPermission grants a permission of a specified key or range to a specified role.
+  rpc RoleGrantPermission(AuthRoleGrantPermissionRequest) returns (AuthRoleGrantPermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/grant"
+        body: "*"
+    };
+  }
+
+  // RoleRevokePermission revokes a key or range permission of a specified role.
+  rpc RoleRevokePermission(AuthRoleRevokePermissionRequest) returns (AuthRoleRevokePermissionResponse) {
+      option (google.api.http) = {
+        post: "/v3beta/auth/role/revoke"
+        body: "*"
+    };
+  }
+}
+
+message ResponseHeader {
+  // cluster_id is the ID of the cluster which sent the response.
+  uint64 cluster_id = 1;
+  // member_id is the ID of the member which sent the response.
+  uint64 member_id = 2;
+  // revision is the key-value store revision when the request was applied.
+  // For watch progress responses, the header.revision indicates progress. All future events
+  // recieved in this stream are guaranteed to have a higher revision number than the
+  // header.revision number.
+  int64 revision = 3;
+  // raft_term is the raft term when the request was applied.
+  uint64 raft_term = 4;
+}
+
+message RangeRequest {
+  enum SortOrder {
+	NONE = 0; // default, no sorting
+	ASCEND = 1; // lowest target value first
+	DESCEND = 2; // highest target value first
+  }
+  enum SortTarget {
+	KEY = 0;
+	VERSION = 1;
+	CREATE = 2;
+	MOD = 3;
+	VALUE = 4;
+  }
+
+  // key is the first key for the range. If range_end is not given, the request only looks up key.
+  bytes key = 1;
+  // range_end is the upper bound on the requested range [key, range_end).
+  // If range_end is '\0', the range is all keys >= key.
+  // If range_end is key plus one (e.g., "aa"+1 == "ab", "a\xff"+1 == "b"),
+  // then the range request gets all keys prefixed with key.
+  // If both key and range_end are '\0', then the range request returns all keys.
+  bytes range_end = 2;
+  // limit is a limit on the number of keys returned for the request. When limit is set to 0,
+  // it is treated as no limit.
+  int64 limit = 3;
+  // revision is the point-in-time of the key-value store to use for the range.
+  // If revision is less or equal to zero, the range is over the newest key-value store.
+  // If the revision has been compacted, ErrCompacted is returned as a response.
+  int64 revision = 4;
+
+  // sort_order is the order for returned sorted results.
+  SortOrder sort_order = 5;
+
+  // sort_target is the key-value field to use for sorting.
+  SortTarget sort_target = 6;
+
+  // serializable sets the range request to use serializable member-local reads.
+  // Range requests are linearizable by default; linearizable requests have higher
+  // latency and lower throughput than serializable requests but reflect the current
+  // consensus of the cluster. For better performance, in exchange for possible stale reads,
+  // a serializable range request is served locally without needing to reach consensus
+  // with other nodes in the cluster.
+  bool serializable = 7;
+
+  // keys_only when set returns only the keys and not the values.
+  bool keys_only = 8;
+
+  // count_only when set returns only the count of the keys in the range.
+  bool count_only = 9;
+
+  // min_mod_revision is the lower bound for returned key mod revisions; all keys with
+  // lesser mod revisions will be filtered away.
+  int64 min_mod_revision = 10;
+
+  // max_mod_revision is the upper bound for returned key mod revisions; all keys with
+  // greater mod revisions will be filtered away.
+  int64 max_mod_revision = 11;
+
+  // min_create_revision is the lower bound for returned key create revisions; all keys with
+  // lesser create trevisions will be filtered away.
+  int64 min_create_revision = 12;
+
+  // max_create_revision is the upper bound for returned key create revisions; all keys with
+  // greater create revisions will be filtered away.
+  int64 max_create_revision = 13;
+}
+
+message RangeResponse {
+  ResponseHeader header = 1;
+  // kvs is the list of key-value pairs matched by the range request.
+  // kvs is empty when count is requested.
+  repeated mvccpb.KeyValue kvs = 2;
+  // more indicates if there are more keys to return in the requested range.
+  bool more = 3;
+  // count is set to the number of keys within the range when requested.
+  int64 count = 4;
+}
+
+message PutRequest {
+  // key is the key, in bytes, to put into the key-value store.
+  bytes key = 1;
+  // value is the value, in bytes, to associate with the key in the key-value store.
+  bytes value = 2;
+  // lease is the lease ID to associate with the key in the key-value store. A lease
+  // value of 0 indicates no lease.
+  int64 lease = 3;
+
+  // If prev_kv is set, etcd gets the previous key-value pair before changing it.
+  // The previous key-value pair will be returned in the put response.
+  bool prev_kv = 4;
+
+  // If ignore_value is set, etcd updates the key using its current value.
+  // Returns an error if the key does not exist.
+  bool ignore_value = 5;
+
+  // If ignore_lease is set, etcd updates the key using its current lease.
+  // Returns an error if the key does not exist.
+  bool ignore_lease = 6;
+}
+
+message PutResponse {
+  ResponseHeader header = 1;
+  // if prev_kv is set in the request, the previous key-value pair will be returned.
+  mvccpb.KeyValue prev_kv = 2;
+}
+
+message DeleteRangeRequest {
+  // key is the first key to delete in the range.
+  bytes key = 1;
+  // range_end is the key following the last key to delete for the range [key, range_end).
+  // If range_end is not given, the range is defined to contain only the key argument.
+  // If range_end is one bit larger than the given key, then the range is all the keys
+  // with the prefix (the given key).
+  // If range_end is '\0', the range is all keys greater than or equal to the key argument.
+  bytes range_end = 2;
+
+  // If prev_kv is set, etcd gets the previous key-value pairs before deleting it.
+  // The previous key-value pairs will be returned in the delete response.
+  bool prev_kv = 3;
+}
+
+message DeleteRangeResponse {
+  ResponseHeader header = 1;
+  // deleted is the number of keys deleted by the delete range request.
+  int64 deleted = 2;
+  // if prev_kv is set in the request, the previous key-value pairs will be returned.
+  repeated mvccpb.KeyValue prev_kvs = 3;
+}
+
+message RequestOp {
+  // request is a union of request types accepted by a transaction.
+  oneof request {
+    RangeRequest request_range = 1;
+    PutRequest request_put = 2;
+    DeleteRangeRequest request_delete_range = 3;
+    TxnRequest request_txn = 4;
+  }
+}
+
+message ResponseOp {
+  // response is a union of response types returned by a transaction.
+  oneof response {
+    RangeResponse response_range = 1;
+    PutResponse response_put = 2;
+    DeleteRangeResponse response_delete_range = 3;
+    TxnResponse response_txn = 4;
+  }
+}
+
+message Compare {
+  enum CompareResult {
+    EQUAL = 0;
+    GREATER = 1;
+    LESS = 2;
+    NOT_EQUAL = 3;
+  }
+  enum CompareTarget {
+    VERSION = 0;
+    CREATE = 1;
+    MOD = 2;
+    VALUE= 3;
+    LEASE = 4;
+  }
+  // result is logical comparison operation for this comparison.
+  CompareResult result = 1;
+  // target is the key-value field to inspect for the comparison.
+  CompareTarget target = 2;
+  // key is the subject key for the comparison operation.
+  bytes key = 3;
+  oneof target_union {
+    // version is the version of the given key
+    int64 version = 4;
+    // create_revision is the creation revision of the given key
+    int64 create_revision = 5;
+    // mod_revision is the last modified revision of the given key.
+    int64 mod_revision = 6;
+    // value is the value of the given key, in bytes.
+    bytes value = 7;
+    // lease is the lease id of the given key.
+    int64 lease = 8;
+    // leave room for more target_union field tags, jump to 64
+  }
+
+  // range_end compares the given target to all keys in the range [key, range_end).
+  // See RangeRequest for more details on key ranges.
+  bytes range_end = 64;
+  // TODO: fill out with most of the rest of RangeRequest fields when needed.
+}
+
+// From google paxosdb paper:
+// Our implementation hinges around a powerful primitive which we call MultiOp. All other database
+// operations except for iteration are implemented as a single call to MultiOp. A MultiOp is applied atomically
+// and consists of three components:
+// 1. A list of tests called guard. Each test in guard checks a single entry in the database. It may check
+// for the absence or presence of a value, or compare with a given value. Two different tests in the guard
+// may apply to the same or different entries in the database. All tests in the guard are applied and
+// MultiOp returns the results. If all tests are true, MultiOp executes t op (see item 2 below), otherwise
+// it executes f op (see item 3 below).
+// 2. A list of database operations called t op. Each operation in the list is either an insert, delete, or
+// lookup operation, and applies to a single database entry. Two different operations in the list may apply
+// to the same or different entries in the database. These operations are executed
+// if guard evaluates to
+// true.
+// 3. A list of database operations called f op. Like t op, but executed if guard evaluates to false.
+message TxnRequest {
+  // compare is a list of predicates representing a conjunction of terms.
+  // If the comparisons succeed, then the success requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  // If the comparisons fail, then the failure requests will be processed in order,
+  // and the response will contain their respective responses in order.
+  repeated Compare compare = 1;
+  // success is a list of requests which will be applied when compare evaluates to true.
+  repeated RequestOp success = 2;
+  // failure is a list of requests which will be applied when compare evaluates to false.
+  repeated RequestOp failure = 3;
+}
+
+message TxnResponse {
+  ResponseHeader header = 1;
+  // succeeded is set to true if the compare evaluated to true or false otherwise.
+  bool succeeded = 2;
+  // responses is a list of responses corresponding to the results from applying
+  // success if succeeded is true or failure if succeeded is false.
+  repeated ResponseOp responses = 3;
+}
+
+// CompactionRequest compacts the key-value store up to a given revision. All superseded keys
+// with a revision less than the compaction revision will be removed.
+message CompactionRequest {
+  // revision is the key-value store revision for the compaction operation.
+  int64 revision = 1;
+  // physical is set so the RPC will wait until the compaction is physically
+  // applied to the local database such that compacted entries are totally
+  // removed from the backend database.
+  bool physical = 2;
+}
+
+message CompactionResponse {
+  ResponseHeader header = 1;
+}
+
+message HashRequest {
+}
+
+message HashKVRequest {
+  // revision is the key-value store revision for the hash operation.
+  int64 revision = 1;
+}
+
+message HashKVResponse {
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's MVCC keys up to a given revision.
+  uint32 hash = 2;
+  // compact_revision is the compacted revision of key-value store when hash begins.
+  int64 compact_revision = 3;
+}
+
+message HashResponse {
+  ResponseHeader header = 1;
+  // hash is the hash value computed from the responding member's KV's backend.
+  uint32 hash = 2;
+}
+
+message SnapshotRequest {
+}
+
+message SnapshotResponse {
+  // header has the current key-value store information. The first header in the snapshot
+  // stream indicates the point in time of the snapshot.
+  ResponseHeader header = 1;
+
+  // remaining_bytes is the number of blob bytes to be sent after this message
+  uint64 remaining_bytes = 2;
+
+  // blob contains the next chunk of the snapshot in the snapshot stream.
+  bytes blob = 3;
+}
+
+message WatchRequest {
+  // request_union is a request to either create a new watcher or cancel an existing watcher.
+  oneof request_union {
+    WatchCreateRequest create_request = 1;
+    WatchCancelRequest cancel_request = 2;
+    WatchProgressRequest progress_request = 3;
+  }
+}
+
+message WatchCreateRequest {
+  // key is the key to register for watching.
+  bytes key = 1;
+  // range_end is the end of the range [key, range_end) to watch. If range_end is not given,
+  // only the key argument is watched. If range_end is equal to '\0', all keys greater than
+  // or equal to the key argument are watched.
+  // If the range_end is one bit larger than the given key,
+  // then all keys with the prefix (the given key) will be watched.
+  bytes range_end = 2;
+  // start_revision is an optional revision to watch from (inclusive). No start_revision is "now".
+  int64 start_revision = 3;
+  // progress_notify is set so that the etcd server will periodically send a WatchResponse with
+  // no events to the new watcher if there are no recent events. It is useful when clients
+  // wish to recover a disconnected watcher starting from a recent known revision.
+  // The etcd server may decide how often it will send notifications based on current load.
+  bool progress_notify = 4;
+
+  enum FilterType {
+  // filter out put event.
+  NOPUT = 0;
+  // filter out delete event.
+  NODELETE = 1;
+  }
+  // filters filter the events at server side before it sends back to the watcher.
+  repeated FilterType filters = 5;
+
+  // If prev_kv is set, created watcher gets the previous KV before the event happens.
+  // If the previous KV is already compacted, nothing will be returned.
+  bool prev_kv = 6;
+
+  // If watch_id is provided and non-zero, it will be assigned to this watcher.
+  // Since creating a watcher in etcd is not a synchronous operation,
+  // this can be used ensure that ordering is correct when creating multiple
+  // watchers on the same stream. Creating a watcher with an ID already in
+  // use on the stream will cause an error to be returned.
+  int64 watch_id = 7;
+
+  // fragment enables splitting large revisions into multiple watch responses.
+  bool fragment = 8;
+}
+
+message WatchCancelRequest {
+  // watch_id is the watcher id to cancel so that no more events are transmitted.
+  int64 watch_id = 1;
+}
+
+// Requests the a watch stream progress status be sent in the watch response stream as soon as
+// possible.
+message WatchProgressRequest {
+}
+
+message WatchResponse {
+  ResponseHeader header = 1;
+  // watch_id is the ID of the watcher that corresponds to the response.
+  int64 watch_id = 2;
+  // created is set to true if the response is for a create watch request.
+  // The client should record the watch_id and expect to receive events for
+  // the created watcher from the same stream.
+  // All events sent to the created watcher will attach with the same watch_id.
+  bool created = 3;
+  // canceled is set to true if the response is for a cancel watch request.
+  // No further events will be sent to the canceled watcher.
+  bool canceled = 4;
+  // compact_revision is set to the minimum index if a watcher tries to watch
+  // at a compacted index.
+  //
+  // This happens when creating a watcher at a compacted revision or the watcher cannot
+  // catch up with the progress of the key-value store.
+  //
+  // The client should treat the watcher as canceled and should not try to create any
+  // watcher with the same start_revision again.
+  int64 compact_revision  = 5;
+
+  // cancel_reason indicates the reason for canceling the watcher.
+  string cancel_reason = 6;
+
+  // framgment is true if large watch response was split over multiple responses.
+  bool fragment = 7;
+
+  repeated mvccpb.Event events = 11;
+}
+
+message LeaseGrantRequest {
+  // TTL is the advisory time-to-live in seconds. Expired lease will return -1.
+  int64 TTL = 1;
+  // ID is the requested ID for the lease. If ID is set to 0, the lessor chooses an ID.
+  int64 ID = 2;
+}
+
+message LeaseGrantResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID for the granted lease.
+  int64 ID = 2;
+  // TTL is the server chosen lease time-to-live in seconds.
+  int64 TTL = 3;
+  string error = 4;
+}
+
+message LeaseRevokeRequest {
+  // ID is the lease ID to revoke. When the ID is revoked, all associated keys will be deleted.
+  int64 ID = 1;
+}
+
+message LeaseRevokeResponse {
+  ResponseHeader header = 1;
+}
+
+message LeaseKeepAliveRequest {
+  // ID is the lease ID for the lease to keep alive.
+  int64 ID = 1;
+}
+
+message LeaseKeepAliveResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the new time-to-live for the lease.
+  int64 TTL = 3;
+}
+
+message LeaseTimeToLiveRequest {
+  // ID is the lease ID for the lease.
+  int64 ID = 1;
+  // keys is true to query all the keys attached to this lease.
+  bool keys = 2;
+}
+
+message LeaseTimeToLiveResponse {
+  ResponseHeader header = 1;
+  // ID is the lease ID from the keep alive request.
+  int64 ID = 2;
+  // TTL is the remaining TTL in seconds for the lease; the lease will expire in under TTL+1 seconds.
+  int64 TTL = 3;
+  // GrantedTTL is the initial granted time in seconds upon lease creation/renewal.
+  int64 grantedTTL = 4;
+  // Keys is the list of keys attached to this lease.
+  repeated bytes keys = 5;
+}
+
+message LeaseLeasesRequest {
+}
+
+message LeaseStatus {
+  int64 ID = 1;
+  // TODO: int64 TTL = 2;
+}
+
+message LeaseLeasesResponse {
+  ResponseHeader header = 1;
+  repeated LeaseStatus leases = 2;
+}
+
+message Member {
+  // ID is the member ID for this member.
+  uint64 ID = 1;
+  // name is the human-readable name of the member. If the member is not started, the name will be an empty string.
+  string name = 2;
+  // peerURLs is the list of URLs the member exposes to the cluster for communication.
+  repeated string peerURLs = 3;
+  // clientURLs is the list of URLs the member exposes to clients for communication. If the member is not started, clientURLs will be empty.
+  repeated string clientURLs = 4;
+}
+
+message MemberAddRequest {
+  // peerURLs is the list of URLs the added member will use to communicate with the cluster.
+  repeated string peerURLs = 1;
+}
+
+message MemberAddResponse {
+  ResponseHeader header = 1;
+  // member is the member information for the added member.
+  Member member = 2;
+  // members is a list of all members after adding the new member.
+  repeated Member members = 3;
+}
+
+message MemberRemoveRequest {
+  // ID is the member ID of the member to remove.
+  uint64 ID = 1;
+}
+
+message MemberRemoveResponse {
+  ResponseHeader header = 1;
+  // members is a list of all members after removing the member.
+  repeated Member members = 2;
+}
+
+message MemberUpdateRequest {
+  // ID is the member ID of the member to update.
+  uint64 ID = 1;
+  // peerURLs is the new list of URLs the member will use to communicate with the cluster.
+  repeated string peerURLs = 2;
+}
+
+message MemberUpdateResponse{
+  ResponseHeader header = 1;
+  // members is a list of all members after updating the member.
+  repeated Member members = 2;
+}
+
+message MemberListRequest {
+}
+
+message MemberListResponse {
+  ResponseHeader header = 1;
+  // members is a list of all members associated with the cluster.
+  repeated Member members = 2;
+}
+
+message DefragmentRequest {
+}
+
+message DefragmentResponse {
+  ResponseHeader header = 1;
+}
+
+message MoveLeaderRequest {
+  // targetID is the node ID for the new leader.
+  uint64 targetID = 1;
+}
+
+message MoveLeaderResponse {
+  ResponseHeader header = 1;
+}
+
+enum AlarmType {
+	NONE = 0; // default, used to query if any alarm is active
+	NOSPACE = 1; // space quota is exhausted
+	CORRUPT = 2; // kv store corruption detected
+}
+
+message AlarmRequest {
+  enum AlarmAction {
+	GET = 0;
+	ACTIVATE = 1;
+	DEACTIVATE = 2;
+  }
+  // action is the kind of alarm request to issue. The action
+  // may GET alarm statuses, ACTIVATE an alarm, or DEACTIVATE a
+  // raised alarm.
+  AlarmAction action = 1;
+  // memberID is the ID of the member associated with the alarm. If memberID is 0, the
+  // alarm request covers all members.
+  uint64 memberID = 2;
+  // alarm is the type of alarm to consider for this request.
+  AlarmType alarm = 3;
+}
+
+message AlarmMember {
+  // memberID is the ID of the member associated with the raised alarm.
+  uint64 memberID = 1;
+  // alarm is the type of alarm which has been raised.
+  AlarmType alarm = 2;
+}
+
+message AlarmResponse {
+  ResponseHeader header = 1;
+  // alarms is a list of alarms associated with the alarm request.
+  repeated AlarmMember alarms = 2;
+}
+
+message StatusRequest {
+}
+
+message StatusResponse {
+  ResponseHeader header = 1;
+  // version is the cluster protocol version used by the responding member.
+  string version = 2;
+  // dbSize is the size of the backend database, in bytes, of the responding member.
+  int64 dbSize = 3;
+  // leader is the member ID which the responding member believes is the current leader.
+  uint64 leader = 4;
+  // raftIndex is the current raft index of the responding member.
+  uint64 raftIndex = 5;
+  // raftTerm is the current raft term of the responding member.
+  uint64 raftTerm = 6;
+}
+
+message AuthEnableRequest {
+}
+
+message AuthDisableRequest {
+}
+
+message AuthenticateRequest {
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserAddRequest {
+  string name = 1;
+  string password = 2;
+}
+
+message AuthUserGetRequest {
+  string name = 1;
+}
+
+message AuthUserDeleteRequest {
+  // name is the name of the user to delete.
+  string name = 1;
+}
+
+message AuthUserChangePasswordRequest {
+  // name is the name of the user whose password is being changed.
+  string name = 1;
+  // password is the new password for the user.
+  string password = 2;
+}
+
+message AuthUserGrantRoleRequest {
+  // user is the name of the user which should be granted a given role.
+  string user = 1;
+  // role is the name of the role to grant to the user.
+  string role = 2;
+}
+
+message AuthUserRevokeRoleRequest {
+  string name = 1;
+  string role = 2;
+}
+
+message AuthRoleAddRequest {
+  // name is the name of the role to add to the authentication system.
+  string name = 1;
+}
+
+message AuthRoleGetRequest {
+  string role = 1;
+}
+
+message AuthUserListRequest {
+}
+
+message AuthRoleListRequest {
+}
+
+message AuthRoleDeleteRequest {
+  string role = 1;
+}
+
+message AuthRoleGrantPermissionRequest {
+  // name is the name of the role which will be granted the permission.
+  string name = 1;
+  // perm is the permission to grant to the role.
+  authpb.Permission perm = 2;
+}
+
+message AuthRoleRevokePermissionRequest {
+  string role = 1;
+  string key = 2;
+  string range_end = 3;
+}
+
+message AuthEnableResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthDisableResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthenticateResponse {
+  ResponseHeader header = 1;
+  // token is an authorized token that can be used in succeeding RPCs
+  string token = 2;
+}
+
+message AuthUserAddResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserGetResponse {
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserDeleteResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserChangePasswordResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserGrantRoleResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthUserRevokeRoleResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleAddResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGetResponse {
+  ResponseHeader header = 1;
+
+  repeated authpb.Permission perm = 2;
+}
+
+message AuthRoleListResponse {
+  ResponseHeader header = 1;
+
+  repeated string roles = 2;
+}
+
+message AuthUserListResponse {
+  ResponseHeader header = 1;
+
+  repeated string users = 2;
+}
+
+message AuthRoleDeleteResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleGrantPermissionResponse {
+  ResponseHeader header = 1;
+}
+
+message AuthRoleRevokePermissionResponse {
+  ResponseHeader header = 1;
+}
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
new file mode 100644
index 0000000..4679da5
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.pb.go
@@ -0,0 +1,830 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: kv.proto
+
+package mvccpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type Event_EventType int32
+
+const (
+	PUT    Event_EventType = 0
+	DELETE Event_EventType = 1
+)
+
+var Event_EventType_name = map[int32]string{
+	0: "PUT",
+	1: "DELETE",
+}
+
+var Event_EventType_value = map[string]int32{
+	"PUT":    0,
+	"DELETE": 1,
+}
+
+func (x Event_EventType) String() string {
+	return proto.EnumName(Event_EventType_name, int32(x))
+}
+
+func (Event_EventType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{1, 0}
+}
+
+type KeyValue struct {
+	// key is the key in bytes. An empty key is not allowed.
+	Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
+	// create_revision is the revision of last creation on this key.
+	CreateRevision int64 `protobuf:"varint,2,opt,name=create_revision,json=createRevision,proto3" json:"create_revision,omitempty"`
+	// mod_revision is the revision of last modification on this key.
+	ModRevision int64 `protobuf:"varint,3,opt,name=mod_revision,json=modRevision,proto3" json:"mod_revision,omitempty"`
+	// version is the version of the key. A deletion resets
+	// the version to zero and any modification of the key
+	// increases its version.
+	Version int64 `protobuf:"varint,4,opt,name=version,proto3" json:"version,omitempty"`
+	// value is the value held by the key, in bytes.
+	Value []byte `protobuf:"bytes,5,opt,name=value,proto3" json:"value,omitempty"`
+	// lease is the ID of the lease that attached to key.
+	// When the attached lease expires, the key will be deleted.
+	// If lease is 0, then no lease is attached to the key.
+	Lease                int64    `protobuf:"varint,6,opt,name=lease,proto3" json:"lease,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *KeyValue) Reset()         { *m = KeyValue{} }
+func (m *KeyValue) String() string { return proto.CompactTextString(m) }
+func (*KeyValue) ProtoMessage()    {}
+func (*KeyValue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{0}
+}
+func (m *KeyValue) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *KeyValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_KeyValue.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *KeyValue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KeyValue.Merge(m, src)
+}
+func (m *KeyValue) XXX_Size() int {
+	return m.Size()
+}
+func (m *KeyValue) XXX_DiscardUnknown() {
+	xxx_messageInfo_KeyValue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KeyValue proto.InternalMessageInfo
+
+type Event struct {
+	// type is the kind of event. If type is a PUT, it indicates
+	// new data has been stored to the key. If type is a DELETE,
+	// it indicates the key was deleted.
+	Type Event_EventType `protobuf:"varint,1,opt,name=type,proto3,enum=mvccpb.Event_EventType" json:"type,omitempty"`
+	// kv holds the KeyValue for the event.
+	// A PUT event contains current kv pair.
+	// A PUT event with kv.Version=1 indicates the creation of a key.
+	// A DELETE/EXPIRE event contains the deleted key with
+	// its modification revision set to the revision of deletion.
+	Kv *KeyValue `protobuf:"bytes,2,opt,name=kv,proto3" json:"kv,omitempty"`
+	// prev_kv holds the key-value pair before the event happens.
+	PrevKv               *KeyValue `protobuf:"bytes,3,opt,name=prev_kv,json=prevKv,proto3" json:"prev_kv,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Event) Reset()         { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage()    {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_2216fe83c9c12408, []int{1}
+}
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return m.Size()
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("mvccpb.Event_EventType", Event_EventType_name, Event_EventType_value)
+	proto.RegisterType((*KeyValue)(nil), "mvccpb.KeyValue")
+	proto.RegisterType((*Event)(nil), "mvccpb.Event")
+}
+
+func init() { proto.RegisterFile("kv.proto", fileDescriptor_2216fe83c9c12408) }
+
+var fileDescriptor_2216fe83c9c12408 = []byte{
+	// 303 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x41, 0x4e, 0xc2, 0x40,
+	0x14, 0x86, 0x3b, 0x14, 0x0a, 0x3e, 0x08, 0x36, 0x13, 0x12, 0x27, 0x2e, 0x26, 0x95, 0x8d, 0x18,
+	0x13, 0x4c, 0xf0, 0x06, 0xc6, 0xae, 0x70, 0x61, 0x1a, 0x74, 0x4b, 0x4a, 0x79, 0x21, 0xa4, 0x94,
+	0x69, 0x4a, 0x9d, 0xa4, 0x37, 0x71, 0xef, 0xde, 0x73, 0xb0, 0xe4, 0x08, 0x52, 0x2f, 0x62, 0xfa,
+	0xc6, 0xe2, 0xc6, 0xcd, 0xe4, 0xfd, 0xff, 0xff, 0x65, 0xe6, 0x7f, 0x03, 0x9d, 0x58, 0x8f, 0xd3,
+	0x4c, 0xe5, 0x8a, 0x3b, 0x89, 0x8e, 0xa2, 0x74, 0x71, 0x39, 0x58, 0xa9, 0x95, 0x22, 0xeb, 0xae,
+	0x9a, 0x4c, 0x3a, 0xfc, 0x64, 0xd0, 0x99, 0x62, 0xf1, 0x1a, 0x6e, 0xde, 0x90, 0xbb, 0x60, 0xc7,
+	0x58, 0x08, 0xe6, 0xb1, 0x51, 0x2f, 0xa8, 0x46, 0x7e, 0x0d, 0xe7, 0x51, 0x86, 0x61, 0x8e, 0xf3,
+	0x0c, 0xf5, 0x7a, 0xb7, 0x56, 0x5b, 0xd1, 0xf0, 0xd8, 0xc8, 0x0e, 0xfa, 0xc6, 0x0e, 0x7e, 0x5d,
+	0x7e, 0x05, 0xbd, 0x44, 0x2d, 0xff, 0x28, 0x9b, 0xa8, 0x6e, 0xa2, 0x96, 0x27, 0x44, 0x40, 0x5b,
+	0x63, 0x46, 0x69, 0x93, 0xd2, 0x5a, 0xf2, 0x01, 0xb4, 0x74, 0x55, 0x40, 0xb4, 0xe8, 0x65, 0x23,
+	0x2a, 0x77, 0x83, 0xe1, 0x0e, 0x85, 0x43, 0xb4, 0x11, 0xc3, 0x0f, 0x06, 0x2d, 0x5f, 0xe3, 0x36,
+	0xe7, 0xb7, 0xd0, 0xcc, 0x8b, 0x14, 0xa9, 0x6e, 0x7f, 0x72, 0x31, 0x36, 0x7b, 0x8e, 0x29, 0x34,
+	0xe7, 0xac, 0x48, 0x31, 0x20, 0x88, 0x7b, 0xd0, 0x88, 0x35, 0x75, 0xef, 0x4e, 0xdc, 0x1a, 0xad,
+	0x17, 0x0f, 0x1a, 0xb1, 0xe6, 0x37, 0xd0, 0x4e, 0x33, 0xd4, 0xf3, 0x58, 0x53, 0xf9, 0xff, 0x30,
+	0xa7, 0x02, 0xa6, 0x7a, 0xe8, 0xc1, 0xd9, 0xe9, 0x7e, 0xde, 0x06, 0xfb, 0xf9, 0x65, 0xe6, 0x5a,
+	0x1c, 0xc0, 0x79, 0xf4, 0x9f, 0xfc, 0x99, 0xef, 0xb2, 0x07, 0xb1, 0x3f, 0x4a, 0xeb, 0x70, 0x94,
+	0xd6, 0xbe, 0x94, 0xec, 0x50, 0x4a, 0xf6, 0x55, 0x4a, 0xf6, 0xfe, 0x2d, 0xad, 0x85, 0x43, 0xff,
+	0x7e, 0xff, 0x13, 0x00, 0x00, 0xff, 0xff, 0xb5, 0x45, 0x92, 0x5d, 0xa1, 0x01, 0x00, 0x00,
+}
+
+func (m *KeyValue) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *KeyValue) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *KeyValue) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Lease != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Lease))
+		i--
+		dAtA[i] = 0x30
+	}
+	if len(m.Value) > 0 {
+		i -= len(m.Value)
+		copy(dAtA[i:], m.Value)
+		i = encodeVarintKv(dAtA, i, uint64(len(m.Value)))
+		i--
+		dAtA[i] = 0x2a
+	}
+	if m.Version != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Version))
+		i--
+		dAtA[i] = 0x20
+	}
+	if m.ModRevision != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.ModRevision))
+		i--
+		dAtA[i] = 0x18
+	}
+	if m.CreateRevision != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.CreateRevision))
+		i--
+		dAtA[i] = 0x10
+	}
+	if len(m.Key) > 0 {
+		i -= len(m.Key)
+		copy(dAtA[i:], m.Key)
+		i = encodeVarintKv(dAtA, i, uint64(len(m.Key)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Event) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Event) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Event) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.PrevKv != nil {
+		{
+			size, err := m.PrevKv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintKv(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x1a
+	}
+	if m.Kv != nil {
+		{
+			size, err := m.Kv.MarshalToSizedBuffer(dAtA[:i])
+			if err != nil {
+				return 0, err
+			}
+			i -= size
+			i = encodeVarintKv(dAtA, i, uint64(size))
+		}
+		i--
+		dAtA[i] = 0x12
+	}
+	if m.Type != 0 {
+		i = encodeVarintKv(dAtA, i, uint64(m.Type))
+		i--
+		dAtA[i] = 0x8
+	}
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintKv(dAtA []byte, offset int, v uint64) int {
+	offset -= sovKv(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *KeyValue) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = len(m.Key)
+	if l > 0 {
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.CreateRevision != 0 {
+		n += 1 + sovKv(uint64(m.CreateRevision))
+	}
+	if m.ModRevision != 0 {
+		n += 1 + sovKv(uint64(m.ModRevision))
+	}
+	if m.Version != 0 {
+		n += 1 + sovKv(uint64(m.Version))
+	}
+	l = len(m.Value)
+	if l > 0 {
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.Lease != 0 {
+		n += 1 + sovKv(uint64(m.Lease))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Event) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Type != 0 {
+		n += 1 + sovKv(uint64(m.Type))
+	}
+	if m.Kv != nil {
+		l = m.Kv.Size()
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.PrevKv != nil {
+		l = m.PrevKv.Size()
+		n += 1 + l + sovKv(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovKv(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozKv(x uint64) (n int) {
+	return sovKv(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *KeyValue) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: KeyValue: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: KeyValue: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
+			if m.Key == nil {
+				m.Key = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field CreateRevision", wireType)
+			}
+			m.CreateRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.CreateRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ModRevision", wireType)
+			}
+			m.ModRevision = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ModRevision |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType)
+			}
+			m.Version = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Version |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...)
+			if m.Value == nil {
+				m.Value = []byte{}
+			}
+			iNdEx = postIndex
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType)
+			}
+			m.Lease = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Lease |= int64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipKv(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Event) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Event: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Event: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= Event_EventType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Kv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.Kv == nil {
+				m.Kv = &KeyValue{}
+			}
+			if err := m.Kv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 3:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field PrevKv", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthKv
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthKv
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if m.PrevKv == nil {
+				m.PrevKv = &KeyValue{}
+			}
+			if err := m.PrevKv.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipKv(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthKv
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipKv(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowKv
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowKv
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthKv
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthKv
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowKv
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipKv(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthKv
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthKv = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowKv   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
new file mode 100644
index 0000000..23c911b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/mvcc/mvccpb/kv.proto
@@ -0,0 +1,49 @@
+syntax = "proto3";
+package mvccpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+message KeyValue {
+  // key is the key in bytes. An empty key is not allowed.
+  bytes key = 1;
+  // create_revision is the revision of last creation on this key.
+  int64 create_revision = 2;
+  // mod_revision is the revision of last modification on this key.
+  int64 mod_revision = 3;
+  // version is the version of the key. A deletion resets
+  // the version to zero and any modification of the key
+  // increases its version.
+  int64 version = 4;
+  // value is the value held by the key, in bytes.
+  bytes value = 5;
+  // lease is the ID of the lease that attached to key.
+  // When the attached lease expires, the key will be deleted.
+  // If lease is 0, then no lease is attached to the key.
+  int64 lease = 6;
+}
+
+message Event {
+  enum EventType {
+    PUT = 0;
+    DELETE = 1;
+  }
+  // type is the kind of event. If type is a PUT, it indicates
+  // new data has been stored to the key. If type is a DELETE,
+  // it indicates the key was deleted.
+  EventType type = 1;
+  // kv holds the KeyValue for the event.
+  // A PUT event contains current kv pair.
+  // A PUT event with kv.Version=1 indicates the creation of a key.
+  // A DELETE/EXPIRE event contains the deleted key with
+  // its modification revision set to the revision of deletion.
+  KeyValue kv = 2;
+
+  // prev_kv holds the key-value pair before the event happens.
+  KeyValue prev_kv = 3;
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
new file mode 100644
index 0000000..81b0a9d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/discard_logger.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"log"
+
+	"google.golang.org/grpc/grpclog"
+)
+
+// assert that "discardLogger" satisfy "Logger" interface
+var _ Logger = &discardLogger{}
+
+// NewDiscardLogger returns a new Logger that discards everything except "fatal".
+func NewDiscardLogger() Logger { return &discardLogger{} }
+
+type discardLogger struct{}
+
+func (l *discardLogger) Info(args ...interface{})                    {}
+func (l *discardLogger) Infoln(args ...interface{})                  {}
+func (l *discardLogger) Infof(format string, args ...interface{})    {}
+func (l *discardLogger) Warning(args ...interface{})                 {}
+func (l *discardLogger) Warningln(args ...interface{})               {}
+func (l *discardLogger) Warningf(format string, args ...interface{}) {}
+func (l *discardLogger) Error(args ...interface{})                   {}
+func (l *discardLogger) Errorln(args ...interface{})                 {}
+func (l *discardLogger) Errorf(format string, args ...interface{})   {}
+func (l *discardLogger) Fatal(args ...interface{})                   { log.Fatal(args...) }
+func (l *discardLogger) Fatalln(args ...interface{})                 { log.Fatalln(args...) }
+func (l *discardLogger) Fatalf(format string, args ...interface{})   { log.Fatalf(format, args...) }
+func (l *discardLogger) V(lvl int) bool {
+	return false
+}
+func (l *discardLogger) Lvl(lvl int) grpclog.LoggerV2 { return l }
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/doc.go b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go
new file mode 100644
index 0000000..e919f24
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package logutil includes utilities to facilitate logging.
+package logutil
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
new file mode 100644
index 0000000..d57e173
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/log_level.go
@@ -0,0 +1,70 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"fmt"
+
+	"github.com/coreos/pkg/capnslog"
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+var DefaultLogLevel = "info"
+
+// ConvertToZapLevel converts log level string to zapcore.Level.
+func ConvertToZapLevel(lvl string) zapcore.Level {
+	switch lvl {
+	case "debug":
+		return zap.DebugLevel
+	case "info":
+		return zap.InfoLevel
+	case "warn":
+		return zap.WarnLevel
+	case "error":
+		return zap.ErrorLevel
+	case "dpanic":
+		return zap.DPanicLevel
+	case "panic":
+		return zap.PanicLevel
+	case "fatal":
+		return zap.FatalLevel
+	default:
+		panic(fmt.Sprintf("unknown level %q", lvl))
+	}
+}
+
+// ConvertToCapnslogLogLevel convert log level string to capnslog.LogLevel.
+// TODO: deprecate this in 3.5
+func ConvertToCapnslogLogLevel(lvl string) capnslog.LogLevel {
+	switch lvl {
+	case "debug":
+		return capnslog.DEBUG
+	case "info":
+		return capnslog.INFO
+	case "warn":
+		return capnslog.WARNING
+	case "error":
+		return capnslog.ERROR
+	case "dpanic":
+		return capnslog.CRITICAL
+	case "panic":
+		return capnslog.CRITICAL
+	case "fatal":
+		return capnslog.CRITICAL
+	default:
+		panic(fmt.Sprintf("unknown level %q", lvl))
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
new file mode 100644
index 0000000..e7da80e
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/logger.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import "google.golang.org/grpc/grpclog"
+
+// Logger defines logging interface.
+// TODO: deprecate in v3.5.
+type Logger interface {
+	grpclog.LoggerV2
+
+	// Lvl returns logger if logger's verbosity level >= "lvl".
+	// Otherwise, logger that discards everything.
+	Lvl(lvl int) grpclog.LoggerV2
+}
+
+// assert that "defaultLogger" satisfy "Logger" interface
+var _ Logger = &defaultLogger{}
+
+// NewLogger wraps "grpclog.LoggerV2" that implements "Logger" interface.
+//
+// For example:
+//
+//  var defaultLogger Logger
+//  g := grpclog.NewLoggerV2WithVerbosity(os.Stderr, os.Stderr, os.Stderr, 4)
+//  defaultLogger = NewLogger(g)
+//
+func NewLogger(g grpclog.LoggerV2) Logger { return &defaultLogger{g: g} }
+
+type defaultLogger struct {
+	g grpclog.LoggerV2
+}
+
+func (l *defaultLogger) Info(args ...interface{})                    { l.g.Info(args...) }
+func (l *defaultLogger) Infoln(args ...interface{})                  { l.g.Info(args...) }
+func (l *defaultLogger) Infof(format string, args ...interface{})    { l.g.Infof(format, args...) }
+func (l *defaultLogger) Warning(args ...interface{})                 { l.g.Warning(args...) }
+func (l *defaultLogger) Warningln(args ...interface{})               { l.g.Warning(args...) }
+func (l *defaultLogger) Warningf(format string, args ...interface{}) { l.g.Warningf(format, args...) }
+func (l *defaultLogger) Error(args ...interface{})                   { l.g.Error(args...) }
+func (l *defaultLogger) Errorln(args ...interface{})                 { l.g.Error(args...) }
+func (l *defaultLogger) Errorf(format string, args ...interface{})   { l.g.Errorf(format, args...) }
+func (l *defaultLogger) Fatal(args ...interface{})                   { l.g.Fatal(args...) }
+func (l *defaultLogger) Fatalln(args ...interface{})                 { l.g.Fatal(args...) }
+func (l *defaultLogger) Fatalf(format string, args ...interface{})   { l.g.Fatalf(format, args...) }
+func (l *defaultLogger) V(lvl int) bool                              { return l.g.V(lvl) }
+func (l *defaultLogger) Lvl(lvl int) grpclog.LoggerV2 {
+	if l.g.V(lvl) {
+		return l
+	}
+	return &discardLogger{}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
new file mode 100644
index 0000000..866b6f7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/merge_logger.go
@@ -0,0 +1,194 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"fmt"
+	"sync"
+	"time"
+
+	"github.com/coreos/pkg/capnslog"
+)
+
+var (
+	defaultMergePeriod     = time.Second
+	defaultTimeOutputScale = 10 * time.Millisecond
+
+	outputInterval = time.Second
+)
+
+// line represents a log line that can be printed out
+// through capnslog.PackageLogger.
+type line struct {
+	level capnslog.LogLevel
+	str   string
+}
+
+func (l line) append(s string) line {
+	return line{
+		level: l.level,
+		str:   l.str + " " + s,
+	}
+}
+
+// status represents the merge status of a line.
+type status struct {
+	period time.Duration
+
+	start time.Time // start time of latest merge period
+	count int       // number of merged lines from starting
+}
+
+func (s *status) isInMergePeriod(now time.Time) bool {
+	return s.period == 0 || s.start.Add(s.period).After(now)
+}
+
+func (s *status) isEmpty() bool { return s.count == 0 }
+
+func (s *status) summary(now time.Time) string {
+	ts := s.start.Round(defaultTimeOutputScale)
+	took := now.Round(defaultTimeOutputScale).Sub(ts)
+	return fmt.Sprintf("[merged %d repeated lines in %s]", s.count, took)
+}
+
+func (s *status) reset(now time.Time) {
+	s.start = now
+	s.count = 0
+}
+
+// MergeLogger supports merge logging, which merges repeated log lines
+// and prints summary log lines instead.
+//
+// For merge logging, MergeLogger prints out the line when the line appears
+// at the first time. MergeLogger holds the same log line printed within
+// defaultMergePeriod, and prints out summary log line at the end of defaultMergePeriod.
+// It stops merging when the line doesn't appear within the
+// defaultMergePeriod.
+type MergeLogger struct {
+	*capnslog.PackageLogger
+
+	mu      sync.Mutex // protect statusm
+	statusm map[line]*status
+}
+
+func NewMergeLogger(logger *capnslog.PackageLogger) *MergeLogger {
+	l := &MergeLogger{
+		PackageLogger: logger,
+		statusm:       make(map[line]*status),
+	}
+	go l.outputLoop()
+	return l
+}
+
+func (l *MergeLogger) MergeInfo(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.INFO,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeInfof(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.INFO,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeNotice(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.NOTICE,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeNoticef(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.NOTICE,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeWarning(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.WARNING,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeWarningf(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.WARNING,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) MergeError(entries ...interface{}) {
+	l.merge(line{
+		level: capnslog.ERROR,
+		str:   fmt.Sprint(entries...),
+	})
+}
+
+func (l *MergeLogger) MergeErrorf(format string, args ...interface{}) {
+	l.merge(line{
+		level: capnslog.ERROR,
+		str:   fmt.Sprintf(format, args...),
+	})
+}
+
+func (l *MergeLogger) merge(ln line) {
+	l.mu.Lock()
+
+	// increase count if the logger is merging the line
+	if status, ok := l.statusm[ln]; ok {
+		status.count++
+		l.mu.Unlock()
+		return
+	}
+
+	// initialize status of the line
+	l.statusm[ln] = &status{
+		period: defaultMergePeriod,
+		start:  time.Now(),
+	}
+	// release the lock before IO operation
+	l.mu.Unlock()
+	// print out the line at its first time
+	l.PackageLogger.Logf(ln.level, ln.str)
+}
+
+func (l *MergeLogger) outputLoop() {
+	for now := range time.Tick(outputInterval) {
+		var outputs []line
+
+		l.mu.Lock()
+		for ln, status := range l.statusm {
+			if status.isInMergePeriod(now) {
+				continue
+			}
+			if status.isEmpty() {
+				delete(l.statusm, ln)
+				continue
+			}
+			outputs = append(outputs, ln.append(status.summary(now)))
+			status.reset(now)
+		}
+		l.mu.Unlock()
+
+		for _, o := range outputs {
+			l.PackageLogger.Logf(o.level, o.str)
+		}
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
new file mode 100644
index 0000000..378bee0
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/package_logger.go
@@ -0,0 +1,60 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"github.com/coreos/pkg/capnslog"
+	"google.golang.org/grpc/grpclog"
+)
+
+// assert that "packageLogger" satisfy "Logger" interface
+var _ Logger = &packageLogger{}
+
+// NewPackageLogger wraps "*capnslog.PackageLogger" that implements "Logger" interface.
+//
+// For example:
+//
+//  var defaultLogger Logger
+//  defaultLogger = NewPackageLogger("github.com/coreos/etcd", "snapshot")
+//
+func NewPackageLogger(repo, pkg string) Logger {
+	return &packageLogger{p: capnslog.NewPackageLogger(repo, pkg)}
+}
+
+type packageLogger struct {
+	p *capnslog.PackageLogger
+}
+
+func (l *packageLogger) Info(args ...interface{})                    { l.p.Info(args...) }
+func (l *packageLogger) Infoln(args ...interface{})                  { l.p.Info(args...) }
+func (l *packageLogger) Infof(format string, args ...interface{})    { l.p.Infof(format, args...) }
+func (l *packageLogger) Warning(args ...interface{})                 { l.p.Warning(args...) }
+func (l *packageLogger) Warningln(args ...interface{})               { l.p.Warning(args...) }
+func (l *packageLogger) Warningf(format string, args ...interface{}) { l.p.Warningf(format, args...) }
+func (l *packageLogger) Error(args ...interface{})                   { l.p.Error(args...) }
+func (l *packageLogger) Errorln(args ...interface{})                 { l.p.Error(args...) }
+func (l *packageLogger) Errorf(format string, args ...interface{})   { l.p.Errorf(format, args...) }
+func (l *packageLogger) Fatal(args ...interface{})                   { l.p.Fatal(args...) }
+func (l *packageLogger) Fatalln(args ...interface{})                 { l.p.Fatal(args...) }
+func (l *packageLogger) Fatalf(format string, args ...interface{})   { l.p.Fatalf(format, args...) }
+func (l *packageLogger) V(lvl int) bool {
+	return l.p.LevelAt(capnslog.LogLevel(lvl))
+}
+func (l *packageLogger) Lvl(lvl int) grpclog.LoggerV2 {
+	if l.p.LevelAt(capnslog.LogLevel(lvl)) {
+		return l
+	}
+	return &discardLogger{}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
new file mode 100644
index 0000000..2f69223
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap.go
@@ -0,0 +1,97 @@
+// Copyright 2019 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"sort"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// DefaultZapLoggerConfig defines default zap logger configuration.
+var DefaultZapLoggerConfig = zap.Config{
+	Level: zap.NewAtomicLevelAt(ConvertToZapLevel(DefaultLogLevel)),
+
+	Development: false,
+	Sampling: &zap.SamplingConfig{
+		Initial:    100,
+		Thereafter: 100,
+	},
+
+	Encoding: "json",
+
+	// copied from "zap.NewProductionEncoderConfig" with some updates
+	EncoderConfig: zapcore.EncoderConfig{
+		TimeKey:        "ts",
+		LevelKey:       "level",
+		NameKey:        "logger",
+		CallerKey:      "caller",
+		MessageKey:     "msg",
+		StacktraceKey:  "stacktrace",
+		LineEnding:     zapcore.DefaultLineEnding,
+		EncodeLevel:    zapcore.LowercaseLevelEncoder,
+		EncodeTime:     zapcore.ISO8601TimeEncoder,
+		EncodeDuration: zapcore.StringDurationEncoder,
+		EncodeCaller:   zapcore.ShortCallerEncoder,
+	},
+
+	// Use "/dev/null" to discard all
+	OutputPaths:      []string{"stderr"},
+	ErrorOutputPaths: []string{"stderr"},
+}
+
+// AddOutputPaths adds output paths to the existing output paths, resolving conflicts.
+func AddOutputPaths(cfg zap.Config, outputPaths, errorOutputPaths []string) zap.Config {
+	outputs := make(map[string]struct{})
+	for _, v := range cfg.OutputPaths {
+		outputs[v] = struct{}{}
+	}
+	for _, v := range outputPaths {
+		outputs[v] = struct{}{}
+	}
+	outputSlice := make([]string, 0)
+	if _, ok := outputs["/dev/null"]; ok {
+		// "/dev/null" to discard all
+		outputSlice = []string{"/dev/null"}
+	} else {
+		for k := range outputs {
+			outputSlice = append(outputSlice, k)
+		}
+	}
+	cfg.OutputPaths = outputSlice
+	sort.Strings(cfg.OutputPaths)
+
+	errOutputs := make(map[string]struct{})
+	for _, v := range cfg.ErrorOutputPaths {
+		errOutputs[v] = struct{}{}
+	}
+	for _, v := range errorOutputPaths {
+		errOutputs[v] = struct{}{}
+	}
+	errOutputSlice := make([]string, 0)
+	if _, ok := errOutputs["/dev/null"]; ok {
+		// "/dev/null" to discard all
+		errOutputSlice = []string{"/dev/null"}
+	} else {
+		for k := range errOutputs {
+			errOutputSlice = append(errOutputSlice, k)
+		}
+	}
+	cfg.ErrorOutputPaths = errOutputSlice
+	sort.Strings(cfg.ErrorOutputPaths)
+
+	return cfg
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
new file mode 100644
index 0000000..3f48d81
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_grpc.go
@@ -0,0 +1,111 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+	"google.golang.org/grpc/grpclog"
+)
+
+// NewGRPCLoggerV2 converts "*zap.Logger" to "grpclog.LoggerV2".
+// It discards all INFO level logging in gRPC, if debug level
+// is not enabled in "*zap.Logger".
+func NewGRPCLoggerV2(lcfg zap.Config) (grpclog.LoggerV2, error) {
+	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+	if err != nil {
+		return nil, err
+	}
+	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewGRPCLoggerV2FromZapCore creates "grpclog.LoggerV2" from "zap.Core"
+// and "zapcore.WriteSyncer". It discards all INFO level logging in gRPC,
+// if debug level is not enabled in "*zap.Logger".
+func NewGRPCLoggerV2FromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) grpclog.LoggerV2 {
+	// "AddCallerSkip" to annotate caller outside of "logutil"
+	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+	return &zapGRPCLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapGRPCLogger struct {
+	lg    *zap.Logger
+	sugar *zap.SugaredLogger
+}
+
+func (zl *zapGRPCLogger) Info(args ...interface{}) {
+	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+		return
+	}
+	zl.sugar.Info(args...)
+}
+
+func (zl *zapGRPCLogger) Infoln(args ...interface{}) {
+	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+		return
+	}
+	zl.sugar.Info(args...)
+}
+
+func (zl *zapGRPCLogger) Infof(format string, args ...interface{}) {
+	if !zl.lg.Core().Enabled(zapcore.DebugLevel) {
+		return
+	}
+	zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapGRPCLogger) Warning(args ...interface{}) {
+	zl.sugar.Warn(args...)
+}
+
+func (zl *zapGRPCLogger) Warningln(args ...interface{}) {
+	zl.sugar.Warn(args...)
+}
+
+func (zl *zapGRPCLogger) Warningf(format string, args ...interface{}) {
+	zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapGRPCLogger) Error(args ...interface{}) {
+	zl.sugar.Error(args...)
+}
+
+func (zl *zapGRPCLogger) Errorln(args ...interface{}) {
+	zl.sugar.Error(args...)
+}
+
+func (zl *zapGRPCLogger) Errorf(format string, args ...interface{}) {
+	zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapGRPCLogger) Fatal(args ...interface{}) {
+	zl.sugar.Fatal(args...)
+}
+
+func (zl *zapGRPCLogger) Fatalln(args ...interface{}) {
+	zl.sugar.Fatal(args...)
+}
+
+func (zl *zapGRPCLogger) Fatalf(format string, args ...interface{}) {
+	zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapGRPCLogger) V(l int) bool {
+	// infoLog == 0
+	if l <= 0 { // debug level, then we ignore info level in gRPC
+		return !zl.lg.Core().Enabled(zapcore.DebugLevel)
+	}
+	return true
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
new file mode 100644
index 0000000..b1788bc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_journal.go
@@ -0,0 +1,92 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build !windows
+
+package logutil
+
+import (
+	"bytes"
+	"encoding/json"
+	"fmt"
+	"io"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/etcd/pkg/systemd"
+
+	"github.com/coreos/go-systemd/journal"
+	"go.uber.org/zap/zapcore"
+)
+
+// NewJournalWriter wraps "io.Writer" to redirect log output
+// to the local systemd journal. If journald send fails, it fails
+// back to writing to the original writer.
+// The decode overhead is only <30µs per write.
+// Reference: https://github.com/coreos/pkg/blob/master/capnslog/journald_formatter.go
+func NewJournalWriter(wr io.Writer) (io.Writer, error) {
+	return &journalWriter{Writer: wr}, systemd.DialJournal()
+}
+
+type journalWriter struct {
+	io.Writer
+}
+
+// WARN: assume that etcd uses default field names in zap encoder config
+// make sure to keep this up-to-date!
+type logLine struct {
+	Level  string `json:"level"`
+	Caller string `json:"caller"`
+}
+
+func (w *journalWriter) Write(p []byte) (int, error) {
+	line := &logLine{}
+	if err := json.NewDecoder(bytes.NewReader(p)).Decode(line); err != nil {
+		return 0, err
+	}
+
+	var pri journal.Priority
+	switch line.Level {
+	case zapcore.DebugLevel.String():
+		pri = journal.PriDebug
+	case zapcore.InfoLevel.String():
+		pri = journal.PriInfo
+
+	case zapcore.WarnLevel.String():
+		pri = journal.PriWarning
+	case zapcore.ErrorLevel.String():
+		pri = journal.PriErr
+
+	case zapcore.DPanicLevel.String():
+		pri = journal.PriCrit
+	case zapcore.PanicLevel.String():
+		pri = journal.PriCrit
+	case zapcore.FatalLevel.String():
+		pri = journal.PriCrit
+
+	default:
+		panic(fmt.Errorf("unknown log level: %q", line.Level))
+	}
+
+	err := journal.Send(string(p), pri, map[string]string{
+		"PACKAGE":           filepath.Dir(line.Caller),
+		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+	})
+	if err != nil {
+		// "journal" also falls back to stderr
+		// "fmt.Fprintln(os.Stderr, s)"
+		return w.Writer.Write(p)
+	}
+	return 0, nil
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
new file mode 100644
index 0000000..012d688
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/logutil/zap_raft.go
@@ -0,0 +1,102 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package logutil
+
+import (
+	"errors"
+
+	"github.com/coreos/etcd/raft"
+
+	"go.uber.org/zap"
+	"go.uber.org/zap/zapcore"
+)
+
+// NewRaftLogger builds "raft.Logger" from "*zap.Config".
+func NewRaftLogger(lcfg *zap.Config) (raft.Logger, error) {
+	if lcfg == nil {
+		return nil, errors.New("nil zap.Config")
+	}
+	lg, err := lcfg.Build(zap.AddCallerSkip(1)) // to annotate caller outside of "logutil"
+	if err != nil {
+		return nil, err
+	}
+	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}, nil
+}
+
+// NewRaftLoggerZap converts "*zap.Logger" to "raft.Logger".
+func NewRaftLoggerZap(lg *zap.Logger) raft.Logger {
+	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+// NewRaftLoggerFromZapCore creates "raft.Logger" from "zap.Core"
+// and "zapcore.WriteSyncer".
+func NewRaftLoggerFromZapCore(cr zapcore.Core, syncer zapcore.WriteSyncer) raft.Logger {
+	// "AddCallerSkip" to annotate caller outside of "logutil"
+	lg := zap.New(cr, zap.AddCaller(), zap.AddCallerSkip(1), zap.ErrorOutput(syncer))
+	return &zapRaftLogger{lg: lg, sugar: lg.Sugar()}
+}
+
+type zapRaftLogger struct {
+	lg    *zap.Logger
+	sugar *zap.SugaredLogger
+}
+
+func (zl *zapRaftLogger) Debug(args ...interface{}) {
+	zl.sugar.Debug(args...)
+}
+
+func (zl *zapRaftLogger) Debugf(format string, args ...interface{}) {
+	zl.sugar.Debugf(format, args...)
+}
+
+func (zl *zapRaftLogger) Error(args ...interface{}) {
+	zl.sugar.Error(args...)
+}
+
+func (zl *zapRaftLogger) Errorf(format string, args ...interface{}) {
+	zl.sugar.Errorf(format, args...)
+}
+
+func (zl *zapRaftLogger) Info(args ...interface{}) {
+	zl.sugar.Info(args...)
+}
+
+func (zl *zapRaftLogger) Infof(format string, args ...interface{}) {
+	zl.sugar.Infof(format, args...)
+}
+
+func (zl *zapRaftLogger) Warning(args ...interface{}) {
+	zl.sugar.Warn(args...)
+}
+
+func (zl *zapRaftLogger) Warningf(format string, args ...interface{}) {
+	zl.sugar.Warnf(format, args...)
+}
+
+func (zl *zapRaftLogger) Fatal(args ...interface{}) {
+	zl.sugar.Fatal(args...)
+}
+
+func (zl *zapRaftLogger) Fatalf(format string, args ...interface{}) {
+	zl.sugar.Fatalf(format, args...)
+}
+
+func (zl *zapRaftLogger) Panic(args ...interface{}) {
+	zl.sugar.Panic(args...)
+}
+
+func (zl *zapRaftLogger) Panicf(format string, args ...interface{}) {
+	zl.sugar.Panicf(format, args...)
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/doc.go b/vendor/github.com/coreos/etcd/pkg/systemd/doc.go
new file mode 100644
index 0000000..30e77ce
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/systemd/doc.go
@@ -0,0 +1,16 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package systemd provides utility functions for systemd.
+package systemd
diff --git a/vendor/github.com/coreos/etcd/pkg/systemd/journal.go b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
new file mode 100644
index 0000000..b861c69
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/systemd/journal.go
@@ -0,0 +1,29 @@
+// Copyright 2018 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package systemd
+
+import "net"
+
+// DialJournal returns no error if the process can dial journal socket.
+// Returns an error if dial failed, whichi indicates journald is not available
+// (e.g. run embedded etcd as docker daemon).
+// Reference: https://github.com/coreos/go-systemd/blob/master/journal/journal.go.
+func DialJournal() error {
+	conn, err := net.Dial("unixgram", "/run/systemd/journal/socket")
+	if conn != nil {
+		defer conn.Close()
+	}
+	return err
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/doc.go b/vendor/github.com/coreos/etcd/pkg/types/doc.go
new file mode 100644
index 0000000..de8ef0b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/doc.go
@@ -0,0 +1,17 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package types declares various data types and implements type-checking
+// functions.
+package types
diff --git a/vendor/github.com/coreos/etcd/pkg/types/id.go b/vendor/github.com/coreos/etcd/pkg/types/id.go
new file mode 100644
index 0000000..1b042d9
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/id.go
@@ -0,0 +1,41 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"strconv"
+)
+
+// ID represents a generic identifier which is canonically
+// stored as a uint64 but is typically represented as a
+// base-16 string for input/output
+type ID uint64
+
+func (i ID) String() string {
+	return strconv.FormatUint(uint64(i), 16)
+}
+
+// IDFromString attempts to create an ID from a base-16 string.
+func IDFromString(s string) (ID, error) {
+	i, err := strconv.ParseUint(s, 16, 64)
+	return ID(i), err
+}
+
+// IDSlice implements the sort interface
+type IDSlice []ID
+
+func (p IDSlice) Len() int           { return len(p) }
+func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) }
+func (p IDSlice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/set.go b/vendor/github.com/coreos/etcd/pkg/types/set.go
new file mode 100644
index 0000000..c111b0c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/set.go
@@ -0,0 +1,178 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"reflect"
+	"sort"
+	"sync"
+)
+
+type Set interface {
+	Add(string)
+	Remove(string)
+	Contains(string) bool
+	Equals(Set) bool
+	Length() int
+	Values() []string
+	Copy() Set
+	Sub(Set) Set
+}
+
+func NewUnsafeSet(values ...string) *unsafeSet {
+	set := &unsafeSet{make(map[string]struct{})}
+	for _, v := range values {
+		set.Add(v)
+	}
+	return set
+}
+
+func NewThreadsafeSet(values ...string) *tsafeSet {
+	us := NewUnsafeSet(values...)
+	return &tsafeSet{us, sync.RWMutex{}}
+}
+
+type unsafeSet struct {
+	d map[string]struct{}
+}
+
+// Add adds a new value to the set (no-op if the value is already present)
+func (us *unsafeSet) Add(value string) {
+	us.d[value] = struct{}{}
+}
+
+// Remove removes the given value from the set
+func (us *unsafeSet) Remove(value string) {
+	delete(us.d, value)
+}
+
+// Contains returns whether the set contains the given value
+func (us *unsafeSet) Contains(value string) (exists bool) {
+	_, exists = us.d[value]
+	return exists
+}
+
+// ContainsAll returns whether the set contains all given values
+func (us *unsafeSet) ContainsAll(values []string) bool {
+	for _, s := range values {
+		if !us.Contains(s) {
+			return false
+		}
+	}
+	return true
+}
+
+// Equals returns whether the contents of two sets are identical
+func (us *unsafeSet) Equals(other Set) bool {
+	v1 := sort.StringSlice(us.Values())
+	v2 := sort.StringSlice(other.Values())
+	v1.Sort()
+	v2.Sort()
+	return reflect.DeepEqual(v1, v2)
+}
+
+// Length returns the number of elements in the set
+func (us *unsafeSet) Length() int {
+	return len(us.d)
+}
+
+// Values returns the values of the Set in an unspecified order.
+func (us *unsafeSet) Values() (values []string) {
+	values = make([]string, 0)
+	for val := range us.d {
+		values = append(values, val)
+	}
+	return values
+}
+
+// Copy creates a new Set containing the values of the first
+func (us *unsafeSet) Copy() Set {
+	cp := NewUnsafeSet()
+	for val := range us.d {
+		cp.Add(val)
+	}
+
+	return cp
+}
+
+// Sub removes all elements in other from the set
+func (us *unsafeSet) Sub(other Set) Set {
+	oValues := other.Values()
+	result := us.Copy().(*unsafeSet)
+
+	for _, val := range oValues {
+		if _, ok := result.d[val]; !ok {
+			continue
+		}
+		delete(result.d, val)
+	}
+
+	return result
+}
+
+type tsafeSet struct {
+	us *unsafeSet
+	m  sync.RWMutex
+}
+
+func (ts *tsafeSet) Add(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Add(value)
+}
+
+func (ts *tsafeSet) Remove(value string) {
+	ts.m.Lock()
+	defer ts.m.Unlock()
+	ts.us.Remove(value)
+}
+
+func (ts *tsafeSet) Contains(value string) (exists bool) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Contains(value)
+}
+
+func (ts *tsafeSet) Equals(other Set) bool {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Equals(other)
+}
+
+func (ts *tsafeSet) Length() int {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Length()
+}
+
+func (ts *tsafeSet) Values() (values []string) {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	return ts.us.Values()
+}
+
+func (ts *tsafeSet) Copy() Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Copy().(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
+
+func (ts *tsafeSet) Sub(other Set) Set {
+	ts.m.RLock()
+	defer ts.m.RUnlock()
+	usResult := ts.us.Sub(other).(*unsafeSet)
+	return &tsafeSet{usResult, sync.RWMutex{}}
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/slice.go b/vendor/github.com/coreos/etcd/pkg/types/slice.go
new file mode 100644
index 0000000..0dd9ca7
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/slice.go
@@ -0,0 +1,22 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+// Uint64Slice implements sort interface
+type Uint64Slice []uint64
+
+func (p Uint64Slice) Len() int           { return len(p) }
+func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p Uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urls.go b/vendor/github.com/coreos/etcd/pkg/types/urls.go
new file mode 100644
index 0000000..9e5d03f
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/urls.go
@@ -0,0 +1,82 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"sort"
+	"strings"
+)
+
+type URLs []url.URL
+
+func NewURLs(strs []string) (URLs, error) {
+	all := make([]url.URL, len(strs))
+	if len(all) == 0 {
+		return nil, errors.New("no valid URLs given")
+	}
+	for i, in := range strs {
+		in = strings.TrimSpace(in)
+		u, err := url.Parse(in)
+		if err != nil {
+			return nil, err
+		}
+		if u.Scheme != "http" && u.Scheme != "https" && u.Scheme != "unix" && u.Scheme != "unixs" {
+			return nil, fmt.Errorf("URL scheme must be http, https, unix, or unixs: %s", in)
+		}
+		if _, _, err := net.SplitHostPort(u.Host); err != nil {
+			return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in)
+		}
+		if u.Path != "" {
+			return nil, fmt.Errorf("URL must not contain a path: %s", in)
+		}
+		all[i] = *u
+	}
+	us := URLs(all)
+	us.Sort()
+
+	return us, nil
+}
+
+func MustNewURLs(strs []string) URLs {
+	urls, err := NewURLs(strs)
+	if err != nil {
+		panic(err)
+	}
+	return urls
+}
+
+func (us URLs) String() string {
+	return strings.Join(us.StringSlice(), ",")
+}
+
+func (us *URLs) Sort() {
+	sort.Sort(us)
+}
+func (us URLs) Len() int           { return len(us) }
+func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() }
+func (us URLs) Swap(i, j int)      { us[i], us[j] = us[j], us[i] }
+
+func (us URLs) StringSlice() []string {
+	out := make([]string, len(us))
+	for i := range us {
+		out[i] = us[i].String()
+	}
+
+	return out
+}
diff --git a/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
new file mode 100644
index 0000000..47690cc
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/pkg/types/urlsmap.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package types
+
+import (
+	"fmt"
+	"sort"
+	"strings"
+)
+
+// URLsMap is a map from a name to its URLs.
+type URLsMap map[string]URLs
+
+// NewURLsMap returns a URLsMap instantiated from the given string,
+// which consists of discovery-formatted names-to-URLs, like:
+// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380
+func NewURLsMap(s string) (URLsMap, error) {
+	m := parse(s)
+
+	cl := URLsMap{}
+	for name, urls := range m {
+		us, err := NewURLs(urls)
+		if err != nil {
+			return nil, err
+		}
+		cl[name] = us
+	}
+	return cl, nil
+}
+
+// NewURLsMapFromStringMap takes a map of strings and returns a URLsMap. The
+// string values in the map can be multiple values separated by the sep string.
+func NewURLsMapFromStringMap(m map[string]string, sep string) (URLsMap, error) {
+	var err error
+	um := URLsMap{}
+	for k, v := range m {
+		um[k], err = NewURLs(strings.Split(v, sep))
+		if err != nil {
+			return nil, err
+		}
+	}
+	return um, nil
+}
+
+// String turns URLsMap into discovery-formatted name-to-URLs sorted by name.
+func (c URLsMap) String() string {
+	var pairs []string
+	for name, urls := range c {
+		for _, url := range urls {
+			pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String()))
+		}
+	}
+	sort.Strings(pairs)
+	return strings.Join(pairs, ",")
+}
+
+// URLs returns a list of all URLs.
+// The returned list is sorted in ascending lexicographical order.
+func (c URLsMap) URLs() []string {
+	var urls []string
+	for _, us := range c {
+		for _, u := range us {
+			urls = append(urls, u.String())
+		}
+	}
+	sort.Strings(urls)
+	return urls
+}
+
+// Len returns the size of URLsMap.
+func (c URLsMap) Len() int {
+	return len(c)
+}
+
+// parse parses the given string and returns a map listing the values specified for each key.
+func parse(s string) map[string][]string {
+	m := make(map[string][]string)
+	for s != "" {
+		key := s
+		if i := strings.IndexAny(key, ","); i >= 0 {
+			key, s = key[:i], key[i+1:]
+		} else {
+			s = ""
+		}
+		if key == "" {
+			continue
+		}
+		value := ""
+		if i := strings.Index(key, "="); i >= 0 {
+			key, value = key[:i], key[i+1:]
+		}
+		m[key] = append(m[key], value)
+	}
+	return m
+}
diff --git a/vendor/github.com/coreos/etcd/raft/README.md b/vendor/github.com/coreos/etcd/raft/README.md
new file mode 100644
index 0000000..fde22b1
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/README.md
@@ -0,0 +1,196 @@
+# Raft library
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+This Raft library is stable and feature complete. As of 2016, it is **the most widely used** Raft library in production, serving tens of thousands clusters each day. It powers distributed systems such as etcd, Kubernetes, Docker Swarm, Cloud Foundry Diego, CockroachDB, TiDB, Project Calico, Flannel, and more.
+
+Most Raft implementations have a monolithic design, including storage handling, messaging serialization, and network transport. This library instead follows a minimalistic design philosophy by only implementing the core raft algorithm. This minimalism buys flexibility, determinism, and performance.
+
+To keep the codebase small as well as provide flexibility, the library only implements the Raft algorithm; both network and disk IO are left to the user. Library users must implement their own transportation layer for message passing between Raft peers over the wire. Similarly, users must implement their own storage layer to persist the Raft log and state.
+
+In order to easily test the Raft library, its behavior should be deterministic. To achieve this determinism, the library models Raft as a state machine.  The state machine takes a `Message` as input. A message can either be a local timer update or a network message sent from a remote peer. The state machine's output is a 3-tuple `{[]Messages, []LogEntries, NextState}` consisting of an array of `Messages`, `log entries`, and `Raft state changes`. For state machines with the same state, the same state machine input should always generate the same state machine output.
+
+A simple example application, _raftexample_, is also available to help illustrate how to use this package in practice: https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+# Features
+
+This raft implementation is a full feature implementation of Raft protocol. Features includes:
+
+- Leader election
+- Log replication
+- Log compaction 
+- Membership changes
+- Leadership transfer extension
+- Efficient linearizable read-only queries served by both the leader and followers
+  - leader checks with quorum and bypasses Raft log before processing read-only queries
+  - followers asks leader to get a safe read index before processing read-only queries
+- More efficient lease-based linearizable read-only queries served by both the leader and followers
+  - leader bypasses Raft log and processing read-only queries locally
+  - followers asks leader to get a safe read index before processing read-only queries
+  - this approach relies on the clock of the all the machines in raft group
+
+This raft implementation also includes a few optional enhancements:
+
+- Optimistic pipelining to reduce log replication latency
+- Flow control for log replication
+- Batching Raft messages to reduce synchronized network I/O calls
+- Batching log entries to reduce disk synchronized I/O
+- Writing to leader's disk in parallel
+- Internal proposal redirection from followers to leader
+- Automatic stepping down when the leader loses quorum 
+
+## Notable Users
+
+- [cockroachdb](https://github.com/cockroachdb/cockroach) A Scalable, Survivable, Strongly-Consistent SQL Database
+- [dgraph](https://github.com/dgraph-io/dgraph) A Scalable, Distributed, Low Latency, High Throughput Graph Database
+- [etcd](https://github.com/coreos/etcd) A distributed reliable key-value store
+- [tikv](https://github.com/pingcap/tikv) A Distributed transactional key value database powered by Rust and Raft
+- [swarmkit](https://github.com/docker/swarmkit) A toolkit for orchestrating distributed systems at any scale.
+- [chain core](https://github.com/chain/chain) Software for operating permissioned, multi-asset blockchain networks
+
+## Usage
+
+The primary object in raft is a Node. Either start a Node from scratch using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a three-node cluster
+```go
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  // Set peer list to the other nodes in the cluster.
+  // Note that they need to be started separately as well.
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+```
+
+Start a single node cluster, like so:
+```go
+  // Create storage and config as shown above.
+  // Set peer list to itself, so this node can become the leader of this single-node cluster.
+  peers := []raft.Peer{{ID: 0x01}}
+  n := raft.StartNode(c, peers)
+```
+
+To allow a new node to join this cluster, do not pass in any peers. First, add the node to the existing cluster by calling `ProposeConfChange` on any existing node inside the cluster. Then, start the node with an empty peer list, like so:
+```go
+  // Create storage and config as shown above.
+  n := raft.StartNode(c, nil)
+```
+
+To restart a node from previous state:
+```go
+  storage := raft.NewMemoryStorage()
+
+  // Recover the in-memory storage from persistent snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // Restart raft without peer information.
+  // Peer information is already included in the storage.
+  n := raft.RestartNode(c)
+```
+
+After creating a Node, the user has a few responsibilities:
+
+First, read from the Node.Ready() channel and process the updates it contains. These steps may be performed in parallel, except as noted in step 2.
+
+1. Write Entries, HardState and Snapshot to persistent storage in order, i.e. Entries first, then HardState and Snapshot if they are not empty. If persistent storage supports atomic writes then all of them can be written together. Note that when writing an Entry with Index i, any previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that no messages be sent until the latest HardState has been persisted to disk, and all Entries written by any previous Ready batch (Messages may be sent while entries from the same batch are being persisted). To reduce the I/O latency, an optimization can be applied to make leader write to disk in parallel with its followers (as explained at section 10.2.1 in Raft thesis). If any Message has type MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be large). Note: Marshalling messages is not thread-safe; it is important to make sure that no new entries are persisted while marshalling. The easiest way to achieve this is to serialise the messages directly inside the main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine. If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() to apply it to the node. The configuration change may be cancelled at this point by setting the NodeID field to zero before calling ApplyConfChange (but ApplyConfChange must be called one way or the other, and the decision to cancel must be based solely on the state machine and not external information such as the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates. This may be done at any time after step 1, although all updates must be processed in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an implementation of the Storage interface. The provided MemoryStorage type can be used for this (if repopulating its state upon a restart), or a custom disk-backed implementation can be supplied.
+
+Third, after receiving a message from another node, pass it to Node.Step:
+
+```go
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+```
+
+Finally, call `Node.Tick()` at regular intervals (probably via a `time.Ticker`). Raft has two important timeouts: heartbeat and the election timeout. However, internally to the raft package time is represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+```go
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+```
+
+To propose changes to the state machine from the node to take application data, serialize it into a byte slice and call:
+
+```go
+	n.Propose(ctx, data)
+```
+
+If the proposal is committed, data will appear in committed entries with type raftpb.EntryNormal. There is no guarantee that a proposed command will be committed; the command may have to be reproposed after a timeout. 
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+```go
+	n.ProposeConfChange(ctx, cc)
+```
+
+After config change is committed, some committed entry with type raftpb.EntryConfChange will be returned. This must be applied to node through:
+
+```go
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+```
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+## Implementation notes
+
+This implementation is up to date with the final Raft thesis (https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although this implementation of the membership change protocol differs somewhat from that described in chapter 4. The key invariant that membership changes happen one node at a time is preserved, but in our implementation the membership change takes effect when its entry is applied, not when it is added to the log (so the entry is committed under the old membership instead of the new). This is equivalent in terms of safety, since the old and new configurations are guaranteed to overlap.
+
+To ensure there is no attempt to commit two membership changes at once by matching log positions (which would be unsafe since they should have different quorum requirements), any proposed membership change is simply disallowed while any uncommitted change appears in the leader's log.
+
+This approach introduces a problem when removing a member from a two-member cluster: If one of the members dies before the other one receives the commit of the confchange entry, then the member cannot be removed any more since the cluster cannot make progress. For this reason it is highly recommended to use three or more nodes in every cluster.
diff --git a/vendor/github.com/coreos/etcd/raft/design.md b/vendor/github.com/coreos/etcd/raft/design.md
new file mode 100644
index 0000000..7bc0531
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/design.md
@@ -0,0 +1,57 @@
+## Progress
+
+Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. 
+
+`replication message` is a `msgApp` with log entries.
+
+A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`.
+
+A progress is in one of the three state: `probe`, `replicate`, `snapshot`. 
+
+```
+                            +--------------------------------------------------------+          
+                            |                  send snapshot                         |          
+                            |                                                        |          
+                  +---------+----------+                                  +----------v---------+
+              +--->       probe        |                                  |      snapshot      |
+              |   |  max inflight = 1  <----------------------------------+  max inflight = 0  |
+              |   +---------+----------+                                  +--------------------+
+              |             |            1. snapshot success                                    
+              |             |               (next=snapshot.index + 1)                           
+              |             |            2. snapshot failure                                    
+              |             |               (no change)                                         
+              |             |            3. receives msgAppResp(rej=false&&index>lastsnap.index)
+              |             |               (match=m.index,next=match+1)                        
+receives msgAppResp(rej=true)                                                                   
+(next=match+1)|             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |             |   receives msgAppResp(rej=false&&index>match)                     
+              |             |   (match=m.index,next=match+1)                                    
+              |             |                                                                   
+              |             |                                                                   
+              |             |                                                                   
+              |   +---------v----------+                                                        
+              |   |     replicate      |                                                        
+              +---+  max inflight = n  |                                                        
+                  +--------------------+                                                        
+```
+
+When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`.
+
+When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower.
+
+When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`.
+
+A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress.
+
+A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low.  see open question)
+
+A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied.
+
+### Flow Control
+
+1. limit the max size of message sent per message. Max should be configurable.
+Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next`
+
+2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. 
diff --git a/vendor/github.com/coreos/etcd/raft/doc.go b/vendor/github.com/coreos/etcd/raft/doc.go
new file mode 100644
index 0000000..b55c591
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/doc.go
@@ -0,0 +1,300 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package raft sends and receives messages in the Protocol Buffer format
+defined in the raftpb package.
+
+Raft is a protocol with which a cluster of nodes can maintain a replicated state machine.
+The state machine is kept in sync through the use of a replicated log.
+For more details on Raft, see "In Search of an Understandable Consensus Algorithm"
+(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout.
+
+A simple example application, _raftexample_, is also available to help illustrate
+how to use this package in practice:
+https://github.com/coreos/etcd/tree/master/contrib/raftexample
+
+Usage
+
+The primary object in raft is a Node. You either start a Node from scratch
+using raft.StartNode or start a Node from some initial state using raft.RestartNode.
+
+To start a node from scratch:
+
+  storage := raft.NewMemoryStorage()
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+  n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}})
+
+To restart a node from previous state:
+
+  storage := raft.NewMemoryStorage()
+
+  // recover the in-memory storage from persistent
+  // snapshot, state and entries.
+  storage.ApplySnapshot(snapshot)
+  storage.SetHardState(state)
+  storage.Append(entries)
+
+  c := &Config{
+    ID:              0x01,
+    ElectionTick:    10,
+    HeartbeatTick:   1,
+    Storage:         storage,
+    MaxSizePerMsg:   4096,
+    MaxInflightMsgs: 256,
+  }
+
+  // restart raft without peer information.
+  // peer information is already included in the storage.
+  n := raft.RestartNode(c)
+
+Now that you are holding onto a Node you have a few responsibilities:
+
+First, you must read from the Node.Ready() channel and process the updates
+it contains. These steps may be performed in parallel, except as noted in step
+2.
+
+1. Write HardState, Entries, and Snapshot to persistent storage if they are
+not empty. Note that when writing an Entry with Index i, any
+previously-persisted entries with Index >= i must be discarded.
+
+2. Send all Messages to the nodes named in the To field. It is important that
+no messages be sent until the latest HardState has been persisted to disk,
+and all Entries written by any previous Ready batch (Messages may be sent while
+entries from the same batch are being persisted). To reduce the I/O latency, an
+optimization can be applied to make leader write to disk in parallel with its
+followers (as explained at section 10.2.1 in Raft thesis). If any Message has type
+MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be
+large).
+
+Note: Marshalling messages is not thread-safe; it is important that you
+make sure that no new entries are persisted while marshalling.
+The easiest way to achieve this is to serialise the messages directly inside
+your main raft loop.
+
+3. Apply Snapshot (if any) and CommittedEntries to the state machine.
+If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange()
+to apply it to the node. The configuration change may be cancelled at this point
+by setting the NodeID field to zero before calling ApplyConfChange
+(but ApplyConfChange must be called one way or the other, and the decision to cancel
+must be based solely on the state machine and not external information such as
+the observed health of the node).
+
+4. Call Node.Advance() to signal readiness for the next batch of updates.
+This may be done at any time after step 1, although all updates must be processed
+in the order they were returned by Ready.
+
+Second, all persisted log entries must be made available via an
+implementation of the Storage interface. The provided MemoryStorage
+type can be used for this (if you repopulate its state upon a
+restart), or you can supply your own disk-backed implementation.
+
+Third, when you receive a message from another node, pass it to Node.Step:
+
+	func recvRaftRPC(ctx context.Context, m raftpb.Message) {
+		n.Step(ctx, m)
+	}
+
+Finally, you need to call Node.Tick() at regular intervals (probably
+via a time.Ticker). Raft has two important timeouts: heartbeat and the
+election timeout. However, internally to the raft package time is
+represented by an abstract "tick".
+
+The total state machine handling loop will look something like this:
+
+  for {
+    select {
+    case <-s.Ticker:
+      n.Tick()
+    case rd := <-s.Node.Ready():
+      saveToStorage(rd.State, rd.Entries, rd.Snapshot)
+      send(rd.Messages)
+      if !raft.IsEmptySnap(rd.Snapshot) {
+        processSnapshot(rd.Snapshot)
+      }
+      for _, entry := range rd.CommittedEntries {
+        process(entry)
+        if entry.Type == raftpb.EntryConfChange {
+          var cc raftpb.ConfChange
+          cc.Unmarshal(entry.Data)
+          s.Node.ApplyConfChange(cc)
+        }
+      }
+      s.Node.Advance()
+    case <-s.done:
+      return
+    }
+  }
+
+To propose changes to the state machine from your node take your application
+data, serialize it into a byte slice and call:
+
+	n.Propose(ctx, data)
+
+If the proposal is committed, data will appear in committed entries with type
+raftpb.EntryNormal. There is no guarantee that a proposed command will be
+committed; you may have to re-propose after a timeout.
+
+To add or remove node in a cluster, build ConfChange struct 'cc' and call:
+
+	n.ProposeConfChange(ctx, cc)
+
+After config change is committed, some committed entry with type
+raftpb.EntryConfChange will be returned. You must apply it to node through:
+
+	var cc raftpb.ConfChange
+	cc.Unmarshal(data)
+	n.ApplyConfChange(cc)
+
+Note: An ID represents a unique node in a cluster for all time. A
+given ID MUST be used only once even if the old node has been removed.
+This means that for example IP addresses make poor node IDs since they
+may be reused. Node IDs must be non-zero.
+
+Implementation notes
+
+This implementation is up to date with the final Raft thesis
+(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our
+implementation of the membership change protocol differs somewhat from
+that described in chapter 4. The key invariant that membership changes
+happen one node at a time is preserved, but in our implementation the
+membership change takes effect when its entry is applied, not when it
+is added to the log (so the entry is committed under the old
+membership instead of the new). This is equivalent in terms of safety,
+since the old and new configurations are guaranteed to overlap.
+
+To ensure that we do not attempt to commit two membership changes at
+once by matching log positions (which would be unsafe since they
+should have different quorum requirements), we simply disallow any
+proposed membership change while any uncommitted change appears in
+the leader's log.
+
+This approach introduces a problem when you try to remove a member
+from a two-member cluster: If one of the members dies before the
+other one receives the commit of the confchange entry, then the member
+cannot be removed any more since the cluster cannot make progress.
+For this reason it is highly recommended to use three or more nodes in
+every cluster.
+
+MessageType
+
+Package raft sends and receives message in Protocol Buffer format (defined
+in raftpb package). Each state (follower, candidate, leader) implements its
+own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when
+advancing with the given raftpb.Message. Each step is determined by its
+raftpb.MessageType. Note that every step is checked by one common method
+'Step' that safety-checks the terms of node and incoming message to prevent
+stale log entries:
+
+	'MsgHup' is used for election. If a node is a follower or candidate, the
+	'tick' function in 'raft' struct is set as 'tickElection'. If a follower or
+	candidate has not received any heartbeat before the election timeout, it
+	passes 'MsgHup' to its Step method and becomes (or remains) a candidate to
+	start a new election.
+
+	'MsgBeat' is an internal type that signals the leader to send a heartbeat of
+	the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in
+	the 'raft' struct is set as 'tickHeartbeat', and triggers the leader to
+	send periodic 'MsgHeartbeat' messages to its followers.
+
+	'MsgProp' proposes to append data to its log entries. This is a special
+	type to redirect proposals to leader. Therefore, send method overwrites
+	raftpb.Message's term with its HardState's term to avoid attaching its
+	local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step'
+	method, the leader first calls the 'appendEntry' method to append entries
+	to its log, and then calls 'bcastAppend' method to send those entries to
+	its peers. When passed to candidate, 'MsgProp' is dropped. When passed to
+	follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send
+	method. It is stored with sender's ID and later forwarded to leader by
+	rafthttp package.
+
+	'MsgApp' contains log entries to replicate. A leader calls bcastAppend,
+	which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp'
+	type. When 'MsgApp' is passed to candidate's Step method, candidate reverts
+	back to follower, because it indicates that there is a valid leader sending
+	'MsgApp' messages. Candidate and follower respond to this message in
+	'MsgAppResp' type.
+
+	'MsgAppResp' is response to log replication request('MsgApp'). When
+	'MsgApp' is passed to candidate or follower's Step method, it responds by
+	calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft
+	mailbox.
+
+	'MsgVote' requests votes for election. When a node is a follower or
+	candidate and 'MsgHup' is passed to its Step method, then the node calls
+	'campaign' method to campaign itself to become a leader. Once 'campaign'
+	method is called, the node becomes candidate and sends 'MsgVote' to peers
+	in cluster to request votes. When passed to leader or candidate's Step
+	method and the message's Term is lower than leader's or candidate's,
+	'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true).
+	If leader or candidate receives 'MsgVote' with higher term, it will revert
+	back to follower. When 'MsgVote' is passed to follower, it votes for the
+	sender only when sender's last term is greater than MsgVote's term or
+	sender's last term is equal to MsgVote's term but sender's last committed
+	index is greater than or equal to follower's.
+
+	'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is
+	passed to candidate, the candidate calculates how many votes it has won. If
+	it's more than majority (quorum), it becomes leader and calls 'bcastAppend'.
+	If candidate receives majority of votes of denials, it reverts back to
+	follower.
+
+	'MsgPreVote' and 'MsgPreVoteResp' are used in an optional two-phase election
+	protocol. When Config.PreVote is true, a pre-election is carried out first
+	(using the same rules as a regular election), and no node increases its term
+	number unless the pre-election indicates that the campaigining node would win.
+	This minimizes disruption when a partitioned node rejoins the cluster.
+
+	'MsgSnap' requests to install a snapshot message. When a node has just
+	become a leader or the leader receives 'MsgProp' message, it calls
+	'bcastAppend' method, which then calls 'sendAppend' method to each
+	follower. In 'sendAppend', if a leader fails to get term or entries,
+	the leader requests snapshot by sending 'MsgSnap' type message.
+
+	'MsgSnapStatus' tells the result of snapshot install message. When a
+	follower rejected 'MsgSnap', it indicates the snapshot request with
+	'MsgSnap' had failed from network issues which causes the network layer
+	to fail to send out snapshots to its followers. Then leader considers
+	follower's progress as probe. When 'MsgSnap' were not rejected, it
+	indicates that the snapshot succeeded and the leader sets follower's
+	progress to probe and resumes its log replication.
+
+	'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed
+	to candidate and message's term is higher than candidate's, the candidate
+	reverts back to follower and updates its committed index from the one in
+	this heartbeat. And it sends the message to its mailbox. When
+	'MsgHeartbeat' is passed to follower's Step method and message's term is
+	higher than follower's, the follower updates its leaderID with the ID
+	from the message.
+
+	'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp'
+	is passed to leader's Step method, the leader knows which follower
+	responded. And only when the leader's last committed index is greater than
+	follower's Match index, the leader runs 'sendAppend` method.
+
+	'MsgUnreachable' tells that request(message) wasn't delivered. When
+	'MsgUnreachable' is passed to leader's Step method, the leader discovers
+	that the follower that sent this 'MsgUnreachable' is not reachable, often
+	indicating 'MsgApp' is lost. When follower's progress state is replicate,
+	the leader sets it back to probe.
+
+*/
+package raft
diff --git a/vendor/github.com/coreos/etcd/raft/log.go b/vendor/github.com/coreos/etcd/raft/log.go
new file mode 100644
index 0000000..c3036d3
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/log.go
@@ -0,0 +1,358 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"log"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type raftLog struct {
+	// storage contains all stable entries since the last snapshot.
+	storage Storage
+
+	// unstable contains all unstable entries and snapshot.
+	// they will be saved into storage.
+	unstable unstable
+
+	// committed is the highest log position that is known to be in
+	// stable storage on a quorum of nodes.
+	committed uint64
+	// applied is the highest log position that the application has
+	// been instructed to apply to its state machine.
+	// Invariant: applied <= committed
+	applied uint64
+
+	logger Logger
+}
+
+// newLog returns log using the given storage. It recovers the log to the state
+// that it just commits and applies the latest snapshot.
+func newLog(storage Storage, logger Logger) *raftLog {
+	if storage == nil {
+		log.Panic("storage must not be nil")
+	}
+	log := &raftLog{
+		storage: storage,
+		logger:  logger,
+	}
+	firstIndex, err := storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	lastIndex, err := storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	log.unstable.offset = lastIndex + 1
+	log.unstable.logger = logger
+	// Initialize our committed and applied pointers to the time of the last compaction.
+	log.committed = firstIndex - 1
+	log.applied = firstIndex - 1
+
+	return log
+}
+
+func (l *raftLog) String() string {
+	return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries))
+}
+
+// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise,
+// it returns (last index of new entries, true).
+func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) {
+	if l.matchTerm(index, logTerm) {
+		lastnewi = index + uint64(len(ents))
+		ci := l.findConflict(ents)
+		switch {
+		case ci == 0:
+		case ci <= l.committed:
+			l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed)
+		default:
+			offset := index + 1
+			l.append(ents[ci-offset:]...)
+		}
+		l.commitTo(min(committed, lastnewi))
+		return lastnewi, true
+	}
+	return 0, false
+}
+
+func (l *raftLog) append(ents ...pb.Entry) uint64 {
+	if len(ents) == 0 {
+		return l.lastIndex()
+	}
+	if after := ents[0].Index - 1; after < l.committed {
+		l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed)
+	}
+	l.unstable.truncateAndAppend(ents)
+	return l.lastIndex()
+}
+
+// findConflict finds the index of the conflict.
+// It returns the first pair of conflicting entries between the existing
+// entries and the given entries, if there are any.
+// If there is no conflicting entries, and the existing entries contains
+// all the given entries, zero will be returned.
+// If there is no conflicting entries, but the given entries contains new
+// entries, the index of the first new entry will be returned.
+// An entry is considered to be conflicting if it has the same index but
+// a different term.
+// The first entry MUST have an index equal to the argument 'from'.
+// The index of the given entries MUST be continuously increasing.
+func (l *raftLog) findConflict(ents []pb.Entry) uint64 {
+	for _, ne := range ents {
+		if !l.matchTerm(ne.Index, ne.Term) {
+			if ne.Index <= l.lastIndex() {
+				l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]",
+					ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term)
+			}
+			return ne.Index
+		}
+	}
+	return 0
+}
+
+func (l *raftLog) unstableEntries() []pb.Entry {
+	if len(l.unstable.entries) == 0 {
+		return nil
+	}
+	return l.unstable.entries
+}
+
+// nextEnts returns all the available entries for execution.
+// If applied is smaller than the index of snapshot, it returns all committed
+// entries after the index of snapshot.
+func (l *raftLog) nextEnts() (ents []pb.Entry) {
+	off := max(l.applied+1, l.firstIndex())
+	if l.committed+1 > off {
+		ents, err := l.slice(off, l.committed+1, noLimit)
+		if err != nil {
+			l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err)
+		}
+		return ents
+	}
+	return nil
+}
+
+// hasNextEnts returns if there is any available entries for execution. This
+// is a fast check without heavy raftLog.slice() in raftLog.nextEnts().
+func (l *raftLog) hasNextEnts() bool {
+	off := max(l.applied+1, l.firstIndex())
+	return l.committed+1 > off
+}
+
+func (l *raftLog) snapshot() (pb.Snapshot, error) {
+	if l.unstable.snapshot != nil {
+		return *l.unstable.snapshot, nil
+	}
+	return l.storage.Snapshot()
+}
+
+func (l *raftLog) firstIndex() uint64 {
+	if i, ok := l.unstable.maybeFirstIndex(); ok {
+		return i
+	}
+	index, err := l.storage.FirstIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return index
+}
+
+func (l *raftLog) lastIndex() uint64 {
+	if i, ok := l.unstable.maybeLastIndex(); ok {
+		return i
+	}
+	i, err := l.storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	return i
+}
+
+func (l *raftLog) commitTo(tocommit uint64) {
+	// never decrease commit
+	if l.committed < tocommit {
+		if l.lastIndex() < tocommit {
+			l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex())
+		}
+		l.committed = tocommit
+	}
+}
+
+func (l *raftLog) appliedTo(i uint64) {
+	if i == 0 {
+		return
+	}
+	if l.committed < i || i < l.applied {
+		l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed)
+	}
+	l.applied = i
+}
+
+func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) }
+
+func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) }
+
+func (l *raftLog) lastTerm() uint64 {
+	t, err := l.term(l.lastIndex())
+	if err != nil {
+		l.logger.Panicf("unexpected error when getting the last term (%v)", err)
+	}
+	return t
+}
+
+func (l *raftLog) term(i uint64) (uint64, error) {
+	// the valid term range is [index of dummy entry, last index]
+	dummyIndex := l.firstIndex() - 1
+	if i < dummyIndex || i > l.lastIndex() {
+		// TODO: return an error instead?
+		return 0, nil
+	}
+
+	if t, ok := l.unstable.maybeTerm(i); ok {
+		return t, nil
+	}
+
+	t, err := l.storage.Term(i)
+	if err == nil {
+		return t, nil
+	}
+	if err == ErrCompacted || err == ErrUnavailable {
+		return 0, err
+	}
+	panic(err) // TODO(bdarnell)
+}
+
+func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) {
+	if i > l.lastIndex() {
+		return nil, nil
+	}
+	return l.slice(i, l.lastIndex()+1, maxsize)
+}
+
+// allEntries returns all entries in the log.
+func (l *raftLog) allEntries() []pb.Entry {
+	ents, err := l.entries(l.firstIndex(), noLimit)
+	if err == nil {
+		return ents
+	}
+	if err == ErrCompacted { // try again if there was a racing compaction
+		return l.allEntries()
+	}
+	// TODO (xiangli): handle error?
+	panic(err)
+}
+
+// isUpToDate determines if the given (lastIndex,term) log is more up-to-date
+// by comparing the index and term of the last entries in the existing logs.
+// If the logs have last entries with different terms, then the log with the
+// later term is more up-to-date. If the logs end with the same term, then
+// whichever log has the larger lastIndex is more up-to-date. If the logs are
+// the same, the given log is up-to-date.
+func (l *raftLog) isUpToDate(lasti, term uint64) bool {
+	return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex())
+}
+
+func (l *raftLog) matchTerm(i, term uint64) bool {
+	t, err := l.term(i)
+	if err != nil {
+		return false
+	}
+	return t == term
+}
+
+func (l *raftLog) maybeCommit(maxIndex, term uint64) bool {
+	if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term {
+		l.commitTo(maxIndex)
+		return true
+	}
+	return false
+}
+
+func (l *raftLog) restore(s pb.Snapshot) {
+	l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term)
+	l.committed = s.Metadata.Index
+	l.unstable.restore(s)
+}
+
+// slice returns a slice of log entries from lo through hi-1, inclusive.
+func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	err := l.mustCheckOutOfBounds(lo, hi)
+	if err != nil {
+		return nil, err
+	}
+	if lo == hi {
+		return nil, nil
+	}
+	var ents []pb.Entry
+	if lo < l.unstable.offset {
+		storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize)
+		if err == ErrCompacted {
+			return nil, err
+		} else if err == ErrUnavailable {
+			l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset))
+		} else if err != nil {
+			panic(err) // TODO(bdarnell)
+		}
+
+		// check if ents has reached the size limitation
+		if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo {
+			return storedEnts, nil
+		}
+
+		ents = storedEnts
+	}
+	if hi > l.unstable.offset {
+		unstable := l.unstable.slice(max(lo, l.unstable.offset), hi)
+		if len(ents) > 0 {
+			ents = append([]pb.Entry{}, ents...)
+			ents = append(ents, unstable...)
+		} else {
+			ents = unstable
+		}
+	}
+	return limitSize(ents, maxSize), nil
+}
+
+// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries)
+func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error {
+	if lo > hi {
+		l.logger.Panicf("invalid slice %d > %d", lo, hi)
+	}
+	fi := l.firstIndex()
+	if lo < fi {
+		return ErrCompacted
+	}
+
+	length := l.lastIndex() + 1 - fi
+	if lo < fi || hi > fi+length {
+		l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex())
+	}
+	return nil
+}
+
+func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 {
+	if err == nil {
+		return t
+	}
+	if err == ErrCompacted {
+		return 0
+	}
+	l.logger.Panicf("unexpected error (%v)", err)
+	return 0
+}
diff --git a/vendor/github.com/coreos/etcd/raft/log_unstable.go b/vendor/github.com/coreos/etcd/raft/log_unstable.go
new file mode 100644
index 0000000..263af9c
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/log_unstable.go
@@ -0,0 +1,159 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// unstable.entries[i] has raft log position i+unstable.offset.
+// Note that unstable.offset may be less than the highest log
+// position in storage; this means that the next write to storage
+// might need to truncate the log before persisting unstable.entries.
+type unstable struct {
+	// the incoming unstable snapshot, if any.
+	snapshot *pb.Snapshot
+	// all entries that have not yet been written to storage.
+	entries []pb.Entry
+	offset  uint64
+
+	logger Logger
+}
+
+// maybeFirstIndex returns the index of the first possible entry in entries
+// if it has a snapshot.
+func (u *unstable) maybeFirstIndex() (uint64, bool) {
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index + 1, true
+	}
+	return 0, false
+}
+
+// maybeLastIndex returns the last index if it has at least one
+// unstable entry or snapshot.
+func (u *unstable) maybeLastIndex() (uint64, bool) {
+	if l := len(u.entries); l != 0 {
+		return u.offset + uint64(l) - 1, true
+	}
+	if u.snapshot != nil {
+		return u.snapshot.Metadata.Index, true
+	}
+	return 0, false
+}
+
+// maybeTerm returns the term of the entry at index i, if there
+// is any.
+func (u *unstable) maybeTerm(i uint64) (uint64, bool) {
+	if i < u.offset {
+		if u.snapshot == nil {
+			return 0, false
+		}
+		if u.snapshot.Metadata.Index == i {
+			return u.snapshot.Metadata.Term, true
+		}
+		return 0, false
+	}
+
+	last, ok := u.maybeLastIndex()
+	if !ok {
+		return 0, false
+	}
+	if i > last {
+		return 0, false
+	}
+	return u.entries[i-u.offset].Term, true
+}
+
+func (u *unstable) stableTo(i, t uint64) {
+	gt, ok := u.maybeTerm(i)
+	if !ok {
+		return
+	}
+	// if i < offset, term is matched with the snapshot
+	// only update the unstable entries if term is matched with
+	// an unstable entry.
+	if gt == t && i >= u.offset {
+		u.entries = u.entries[i+1-u.offset:]
+		u.offset = i + 1
+		u.shrinkEntriesArray()
+	}
+}
+
+// shrinkEntriesArray discards the underlying array used by the entries slice
+// if most of it isn't being used. This avoids holding references to a bunch of
+// potentially large entries that aren't needed anymore. Simply clearing the
+// entries wouldn't be safe because clients might still be using them.
+func (u *unstable) shrinkEntriesArray() {
+	// We replace the array if we're using less than half of the space in
+	// it. This number is fairly arbitrary, chosen as an attempt to balance
+	// memory usage vs number of allocations. It could probably be improved
+	// with some focused tuning.
+	const lenMultiple = 2
+	if len(u.entries) == 0 {
+		u.entries = nil
+	} else if len(u.entries)*lenMultiple < cap(u.entries) {
+		newEntries := make([]pb.Entry, len(u.entries))
+		copy(newEntries, u.entries)
+		u.entries = newEntries
+	}
+}
+
+func (u *unstable) stableSnapTo(i uint64) {
+	if u.snapshot != nil && u.snapshot.Metadata.Index == i {
+		u.snapshot = nil
+	}
+}
+
+func (u *unstable) restore(s pb.Snapshot) {
+	u.offset = s.Metadata.Index + 1
+	u.entries = nil
+	u.snapshot = &s
+}
+
+func (u *unstable) truncateAndAppend(ents []pb.Entry) {
+	after := ents[0].Index
+	switch {
+	case after == u.offset+uint64(len(u.entries)):
+		// after is the next index in the u.entries
+		// directly append
+		u.entries = append(u.entries, ents...)
+	case after <= u.offset:
+		u.logger.Infof("replace the unstable entries from index %d", after)
+		// The log is being truncated to before our current offset
+		// portion, so set the offset and replace the entries
+		u.offset = after
+		u.entries = ents
+	default:
+		// truncate to after and copy to u.entries
+		// then append
+		u.logger.Infof("truncate the unstable entries before index %d", after)
+		u.entries = append([]pb.Entry{}, u.slice(u.offset, after)...)
+		u.entries = append(u.entries, ents...)
+	}
+}
+
+func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry {
+	u.mustCheckOutOfBounds(lo, hi)
+	return u.entries[lo-u.offset : hi-u.offset]
+}
+
+// u.offset <= lo <= hi <= u.offset+len(u.offset)
+func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) {
+	if lo > hi {
+		u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi)
+	}
+	upper := u.offset + uint64(len(u.entries))
+	if lo < u.offset || hi > upper {
+		u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper)
+	}
+}
diff --git a/vendor/github.com/coreos/etcd/raft/logger.go b/vendor/github.com/coreos/etcd/raft/logger.go
new file mode 100644
index 0000000..426a77d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/logger.go
@@ -0,0 +1,126 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+	"io/ioutil"
+	"log"
+	"os"
+)
+
+type Logger interface {
+	Debug(v ...interface{})
+	Debugf(format string, v ...interface{})
+
+	Error(v ...interface{})
+	Errorf(format string, v ...interface{})
+
+	Info(v ...interface{})
+	Infof(format string, v ...interface{})
+
+	Warning(v ...interface{})
+	Warningf(format string, v ...interface{})
+
+	Fatal(v ...interface{})
+	Fatalf(format string, v ...interface{})
+
+	Panic(v ...interface{})
+	Panicf(format string, v ...interface{})
+}
+
+func SetLogger(l Logger) { raftLogger = l }
+
+var (
+	defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)}
+	discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)}
+	raftLogger    = Logger(defaultLogger)
+)
+
+const (
+	calldepth = 2
+)
+
+// DefaultLogger is a default implementation of the Logger interface.
+type DefaultLogger struct {
+	*log.Logger
+	debug bool
+}
+
+func (l *DefaultLogger) EnableTimestamps() {
+	l.SetFlags(l.Flags() | log.Ldate | log.Ltime)
+}
+
+func (l *DefaultLogger) EnableDebug() {
+	l.debug = true
+}
+
+func (l *DefaultLogger) Debug(v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprint(v...)))
+	}
+}
+
+func (l *DefaultLogger) Debugf(format string, v ...interface{}) {
+	if l.debug {
+		l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...)))
+	}
+}
+
+func (l *DefaultLogger) Info(v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Infof(format string, v ...interface{}) {
+	l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Error(v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Errorf(format string, v ...interface{}) {
+	l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Warning(v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprint(v...)))
+}
+
+func (l *DefaultLogger) Warningf(format string, v ...interface{}) {
+	l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...)))
+}
+
+func (l *DefaultLogger) Fatal(v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprint(v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Fatalf(format string, v ...interface{}) {
+	l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...)))
+	os.Exit(1)
+}
+
+func (l *DefaultLogger) Panic(v ...interface{}) {
+	l.Logger.Panic(v...)
+}
+
+func (l *DefaultLogger) Panicf(format string, v ...interface{}) {
+	l.Logger.Panicf(format, v...)
+}
+
+func header(lvl, msg string) string {
+	return fmt.Sprintf("%s: %s", lvl, msg)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/node.go b/vendor/github.com/coreos/etcd/raft/node.go
new file mode 100644
index 0000000..33a9db8
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/node.go
@@ -0,0 +1,539 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"context"
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type SnapshotStatus int
+
+const (
+	SnapshotFinish  SnapshotStatus = 1
+	SnapshotFailure SnapshotStatus = 2
+)
+
+var (
+	emptyState = pb.HardState{}
+
+	// ErrStopped is returned by methods on Nodes that have been stopped.
+	ErrStopped = errors.New("raft: stopped")
+)
+
+// SoftState provides state that is useful for logging and debugging.
+// The state is volatile and does not need to be persisted to the WAL.
+type SoftState struct {
+	Lead      uint64 // must use atomic operations to access; keep 64-bit aligned.
+	RaftState StateType
+}
+
+func (a *SoftState) equal(b *SoftState) bool {
+	return a.Lead == b.Lead && a.RaftState == b.RaftState
+}
+
+// Ready encapsulates the entries and messages that are ready to read,
+// be saved to stable storage, committed or sent to other peers.
+// All fields in Ready are read-only.
+type Ready struct {
+	// The current volatile state of a Node.
+	// SoftState will be nil if there is no update.
+	// It is not required to consume or store SoftState.
+	*SoftState
+
+	// The current state of a Node to be saved to stable storage BEFORE
+	// Messages are sent.
+	// HardState will be equal to empty state if there is no update.
+	pb.HardState
+
+	// ReadStates can be used for node to serve linearizable read requests locally
+	// when its applied index is greater than the index in ReadState.
+	// Note that the readState will be returned when raft receives msgReadIndex.
+	// The returned is only valid for the request that requested to read.
+	ReadStates []ReadState
+
+	// Entries specifies entries to be saved to stable storage BEFORE
+	// Messages are sent.
+	Entries []pb.Entry
+
+	// Snapshot specifies the snapshot to be saved to stable storage.
+	Snapshot pb.Snapshot
+
+	// CommittedEntries specifies entries to be committed to a
+	// store/state-machine. These have previously been committed to stable
+	// store.
+	CommittedEntries []pb.Entry
+
+	// Messages specifies outbound messages to be sent AFTER Entries are
+	// committed to stable storage.
+	// If it contains a MsgSnap message, the application MUST report back to raft
+	// when the snapshot has been received or has failed by calling ReportSnapshot.
+	Messages []pb.Message
+
+	// MustSync indicates whether the HardState and Entries must be synchronously
+	// written to disk or if an asynchronous write is permissible.
+	MustSync bool
+}
+
+func isHardStateEqual(a, b pb.HardState) bool {
+	return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit
+}
+
+// IsEmptyHardState returns true if the given HardState is empty.
+func IsEmptyHardState(st pb.HardState) bool {
+	return isHardStateEqual(st, emptyState)
+}
+
+// IsEmptySnap returns true if the given Snapshot is empty.
+func IsEmptySnap(sp pb.Snapshot) bool {
+	return sp.Metadata.Index == 0
+}
+
+func (rd Ready) containsUpdates() bool {
+	return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) ||
+		!IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 ||
+		len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 || len(rd.ReadStates) != 0
+}
+
+// Node represents a node in a raft cluster.
+type Node interface {
+	// Tick increments the internal logical clock for the Node by a single tick. Election
+	// timeouts and heartbeat timeouts are in units of ticks.
+	Tick()
+	// Campaign causes the Node to transition to candidate state and start campaigning to become leader.
+	Campaign(ctx context.Context) error
+	// Propose proposes that data be appended to the log.
+	Propose(ctx context.Context, data []byte) error
+	// ProposeConfChange proposes config change.
+	// At most one ConfChange can be in the process of going through consensus.
+	// Application needs to call ApplyConfChange when applying EntryConfChange type entry.
+	ProposeConfChange(ctx context.Context, cc pb.ConfChange) error
+	// Step advances the state machine using the given message. ctx.Err() will be returned, if any.
+	Step(ctx context.Context, msg pb.Message) error
+
+	// Ready returns a channel that returns the current point-in-time state.
+	// Users of the Node must call Advance after retrieving the state returned by Ready.
+	//
+	// NOTE: No committed entries from the next Ready may be applied until all committed entries
+	// and snapshots from the previous one have finished.
+	Ready() <-chan Ready
+
+	// Advance notifies the Node that the application has saved progress up to the last Ready.
+	// It prepares the node to return the next available Ready.
+	//
+	// The application should generally call Advance after it applies the entries in last Ready.
+	//
+	// However, as an optimization, the application may call Advance while it is applying the
+	// commands. For example. when the last Ready contains a snapshot, the application might take
+	// a long time to apply the snapshot data. To continue receiving Ready without blocking raft
+	// progress, it can call Advance before finishing applying the last ready.
+	Advance()
+	// ApplyConfChange applies config change to the local node.
+	// Returns an opaque ConfState protobuf which must be recorded
+	// in snapshots. Will never return nil; it returns a pointer only
+	// to match MemoryStorage.Compact.
+	ApplyConfChange(cc pb.ConfChange) *pb.ConfState
+
+	// TransferLeadership attempts to transfer leadership to the given transferee.
+	TransferLeadership(ctx context.Context, lead, transferee uint64)
+
+	// ReadIndex request a read state. The read state will be set in the ready.
+	// Read state has a read index. Once the application advances further than the read
+	// index, any linearizable read requests issued before the read request can be
+	// processed safely. The read state will have the same rctx attached.
+	ReadIndex(ctx context.Context, rctx []byte) error
+
+	// Status returns the current status of the raft state machine.
+	Status() Status
+	// ReportUnreachable reports the given node is not reachable for the last send.
+	ReportUnreachable(id uint64)
+	// ReportSnapshot reports the status of the sent snapshot.
+	ReportSnapshot(id uint64, status SnapshotStatus)
+	// Stop performs any necessary termination of the Node.
+	Stop()
+}
+
+type Peer struct {
+	ID      uint64
+	Context []byte
+}
+
+// StartNode returns a new Node given configuration and a list of raft peers.
+// It appends a ConfChangeAddNode entry for each given peer to the initial log.
+func StartNode(c *Config, peers []Peer) Node {
+	r := newRaft(c)
+	// become the follower at term 1 and apply initial configuration
+	// entries of term 1
+	r.becomeFollower(1, None)
+	for _, peer := range peers {
+		cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+		d, err := cc.Marshal()
+		if err != nil {
+			panic("unexpected marshal error")
+		}
+		e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d}
+		r.raftLog.append(e)
+	}
+	// Mark these initial entries as committed.
+	// TODO(bdarnell): These entries are still unstable; do we need to preserve
+	// the invariant that committed < unstable?
+	r.raftLog.committed = r.raftLog.lastIndex()
+	// Now apply them, mainly so that the application can call Campaign
+	// immediately after StartNode in tests. Note that these nodes will
+	// be added to raft twice: here and when the application's Ready
+	// loop calls ApplyConfChange. The calls to addNode must come after
+	// all calls to raftLog.append so progress.next is set after these
+	// bootstrapping entries (it is an error if we try to append these
+	// entries since they have already been committed).
+	// We do not set raftLog.applied so the application will be able
+	// to observe all conf changes via Ready.CommittedEntries.
+	for _, peer := range peers {
+		r.addNode(peer.ID)
+	}
+
+	n := newNode()
+	n.logger = c.Logger
+	go n.run(r)
+	return &n
+}
+
+// RestartNode is similar to StartNode but does not take a list of peers.
+// The current membership of the cluster will be restored from the Storage.
+// If the caller has an existing state machine, pass in the last log index that
+// has been applied to it; otherwise use zero.
+func RestartNode(c *Config) Node {
+	r := newRaft(c)
+
+	n := newNode()
+	n.logger = c.Logger
+	go n.run(r)
+	return &n
+}
+
+// node is the canonical implementation of the Node interface
+type node struct {
+	propc      chan pb.Message
+	recvc      chan pb.Message
+	confc      chan pb.ConfChange
+	confstatec chan pb.ConfState
+	readyc     chan Ready
+	advancec   chan struct{}
+	tickc      chan struct{}
+	done       chan struct{}
+	stop       chan struct{}
+	status     chan chan Status
+
+	logger Logger
+}
+
+func newNode() node {
+	return node{
+		propc:      make(chan pb.Message),
+		recvc:      make(chan pb.Message),
+		confc:      make(chan pb.ConfChange),
+		confstatec: make(chan pb.ConfState),
+		readyc:     make(chan Ready),
+		advancec:   make(chan struct{}),
+		// make tickc a buffered chan, so raft node can buffer some ticks when the node
+		// is busy processing raft messages. Raft node will resume process buffered
+		// ticks when it becomes idle.
+		tickc:  make(chan struct{}, 128),
+		done:   make(chan struct{}),
+		stop:   make(chan struct{}),
+		status: make(chan chan Status),
+	}
+}
+
+func (n *node) Stop() {
+	select {
+	case n.stop <- struct{}{}:
+		// Not already stopped, so trigger it
+	case <-n.done:
+		// Node has already been stopped - no need to do anything
+		return
+	}
+	// Block until the stop has been acknowledged by run()
+	<-n.done
+}
+
+func (n *node) run(r *raft) {
+	var propc chan pb.Message
+	var readyc chan Ready
+	var advancec chan struct{}
+	var prevLastUnstablei, prevLastUnstablet uint64
+	var havePrevLastUnstablei bool
+	var prevSnapi uint64
+	var rd Ready
+
+	lead := None
+	prevSoftSt := r.softState()
+	prevHardSt := emptyState
+
+	for {
+		if advancec != nil {
+			readyc = nil
+		} else {
+			rd = newReady(r, prevSoftSt, prevHardSt)
+			if rd.containsUpdates() {
+				readyc = n.readyc
+			} else {
+				readyc = nil
+			}
+		}
+
+		if lead != r.lead {
+			if r.hasLeader() {
+				if lead == None {
+					r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term)
+				} else {
+					r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term)
+				}
+				propc = n.propc
+			} else {
+				r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term)
+				propc = nil
+			}
+			lead = r.lead
+		}
+
+		select {
+		// TODO: maybe buffer the config propose if there exists one (the way
+		// described in raft dissertation)
+		// Currently it is dropped in Step silently.
+		case m := <-propc:
+			m.From = r.id
+			r.Step(m)
+		case m := <-n.recvc:
+			// filter out response message from unknown From.
+			if pr := r.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+				r.Step(m) // raft never returns an error
+			}
+		case cc := <-n.confc:
+			if cc.NodeID == None {
+				r.resetPendingConf()
+				select {
+				case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+				case <-n.done:
+				}
+				break
+			}
+			switch cc.Type {
+			case pb.ConfChangeAddNode:
+				r.addNode(cc.NodeID)
+			case pb.ConfChangeAddLearnerNode:
+				r.addLearner(cc.NodeID)
+			case pb.ConfChangeRemoveNode:
+				// block incoming proposal when local node is
+				// removed
+				if cc.NodeID == r.id {
+					propc = nil
+				}
+				r.removeNode(cc.NodeID)
+			case pb.ConfChangeUpdateNode:
+				r.resetPendingConf()
+			default:
+				panic("unexpected conf type")
+			}
+			select {
+			case n.confstatec <- pb.ConfState{Nodes: r.nodes()}:
+			case <-n.done:
+			}
+		case <-n.tickc:
+			r.tick()
+		case readyc <- rd:
+			if rd.SoftState != nil {
+				prevSoftSt = rd.SoftState
+			}
+			if len(rd.Entries) > 0 {
+				prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index
+				prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term
+				havePrevLastUnstablei = true
+			}
+			if !IsEmptyHardState(rd.HardState) {
+				prevHardSt = rd.HardState
+			}
+			if !IsEmptySnap(rd.Snapshot) {
+				prevSnapi = rd.Snapshot.Metadata.Index
+			}
+
+			r.msgs = nil
+			r.readStates = nil
+			advancec = n.advancec
+		case <-advancec:
+			if prevHardSt.Commit != 0 {
+				r.raftLog.appliedTo(prevHardSt.Commit)
+			}
+			if havePrevLastUnstablei {
+				r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet)
+				havePrevLastUnstablei = false
+			}
+			r.raftLog.stableSnapTo(prevSnapi)
+			advancec = nil
+		case c := <-n.status:
+			c <- getStatus(r)
+		case <-n.stop:
+			close(n.done)
+			return
+		}
+	}
+}
+
+// Tick increments the internal logical clock for this Node. Election timeouts
+// and heartbeat timeouts are in units of ticks.
+func (n *node) Tick() {
+	select {
+	case n.tickc <- struct{}{}:
+	case <-n.done:
+	default:
+		n.logger.Warningf("A tick missed to fire. Node blocks too long!")
+	}
+}
+
+func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) }
+
+func (n *node) Propose(ctx context.Context, data []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}})
+}
+
+func (n *node) Step(ctx context.Context, m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m.Type) {
+		// TODO: return an error?
+		return nil
+	}
+	return n.step(ctx, m)
+}
+
+func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}})
+}
+
+// Step advances the state machine using msgs. The ctx.Err() will be returned,
+// if any.
+func (n *node) step(ctx context.Context, m pb.Message) error {
+	ch := n.recvc
+	if m.Type == pb.MsgProp {
+		ch = n.propc
+	}
+
+	select {
+	case ch <- m:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-n.done:
+		return ErrStopped
+	}
+}
+
+func (n *node) Ready() <-chan Ready { return n.readyc }
+
+func (n *node) Advance() {
+	select {
+	case n.advancec <- struct{}{}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	var cs pb.ConfState
+	select {
+	case n.confc <- cc:
+	case <-n.done:
+	}
+	select {
+	case cs = <-n.confstatec:
+	case <-n.done:
+	}
+	return &cs
+}
+
+func (n *node) Status() Status {
+	c := make(chan Status)
+	select {
+	case n.status <- c:
+		return <-c
+	case <-n.done:
+		return Status{}
+	}
+}
+
+func (n *node) ReportUnreachable(id uint64) {
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}:
+	case <-n.done:
+	}
+}
+
+func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	select {
+	case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}:
+	case <-n.done:
+	}
+}
+
+func (n *node) TransferLeadership(ctx context.Context, lead, transferee uint64) {
+	select {
+	// manually set 'from' and 'to', so that leader can voluntarily transfers its leadership
+	case n.recvc <- pb.Message{Type: pb.MsgTransferLeader, From: transferee, To: lead}:
+	case <-n.done:
+	case <-ctx.Done():
+	}
+}
+
+func (n *node) ReadIndex(ctx context.Context, rctx []byte) error {
+	return n.step(ctx, pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
+
+func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready {
+	rd := Ready{
+		Entries:          r.raftLog.unstableEntries(),
+		CommittedEntries: r.raftLog.nextEnts(),
+		Messages:         r.msgs,
+	}
+	if softSt := r.softState(); !softSt.equal(prevSoftSt) {
+		rd.SoftState = softSt
+	}
+	if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) {
+		rd.HardState = hardSt
+	}
+	if r.raftLog.unstable.snapshot != nil {
+		rd.Snapshot = *r.raftLog.unstable.snapshot
+	}
+	if len(r.readStates) != 0 {
+		rd.ReadStates = r.readStates
+	}
+	rd.MustSync = MustSync(rd.HardState, prevHardSt, len(rd.Entries))
+	return rd
+}
+
+// MustSync returns true if the hard state and count of Raft entries indicate
+// that a synchronous write to persistent storage is required.
+func MustSync(st, prevst pb.HardState, entsnum int) bool {
+	// Persistent state on all servers:
+	// (Updated on stable storage before responding to RPCs)
+	// currentTerm
+	// votedFor
+	// log entries[]
+	return entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term
+}
diff --git a/vendor/github.com/coreos/etcd/raft/progress.go b/vendor/github.com/coreos/etcd/raft/progress.go
new file mode 100644
index 0000000..ef3787d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/progress.go
@@ -0,0 +1,284 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import "fmt"
+
+const (
+	ProgressStateProbe ProgressStateType = iota
+	ProgressStateReplicate
+	ProgressStateSnapshot
+)
+
+type ProgressStateType uint64
+
+var prstmap = [...]string{
+	"ProgressStateProbe",
+	"ProgressStateReplicate",
+	"ProgressStateSnapshot",
+}
+
+func (st ProgressStateType) String() string { return prstmap[uint64(st)] }
+
+// Progress represents a follower’s progress in the view of the leader. Leader maintains
+// progresses of all followers, and sends entries to the follower based on its progress.
+type Progress struct {
+	Match, Next uint64
+	// State defines how the leader should interact with the follower.
+	//
+	// When in ProgressStateProbe, leader sends at most one replication message
+	// per heartbeat interval. It also probes actual progress of the follower.
+	//
+	// When in ProgressStateReplicate, leader optimistically increases next
+	// to the latest entry sent after sending replication message. This is
+	// an optimized state for fast replicating log entries to the follower.
+	//
+	// When in ProgressStateSnapshot, leader should have sent out snapshot
+	// before and stops sending any replication message.
+	State ProgressStateType
+
+	// Paused is used in ProgressStateProbe.
+	// When Paused is true, raft should pause sending replication message to this peer.
+	Paused bool
+	// PendingSnapshot is used in ProgressStateSnapshot.
+	// If there is a pending snapshot, the pendingSnapshot will be set to the
+	// index of the snapshot. If pendingSnapshot is set, the replication process of
+	// this Progress will be paused. raft will not resend snapshot until the pending one
+	// is reported to be failed.
+	PendingSnapshot uint64
+
+	// RecentActive is true if the progress is recently active. Receiving any messages
+	// from the corresponding follower indicates the progress is active.
+	// RecentActive can be reset to false after an election timeout.
+	RecentActive bool
+
+	// inflights is a sliding window for the inflight messages.
+	// Each inflight message contains one or more log entries.
+	// The max number of entries per message is defined in raft config as MaxSizePerMsg.
+	// Thus inflight effectively limits both the number of inflight messages
+	// and the bandwidth each Progress can use.
+	// When inflights is full, no more message should be sent.
+	// When a leader sends out a message, the index of the last
+	// entry should be added to inflights. The index MUST be added
+	// into inflights in order.
+	// When a leader receives a reply, the previous inflights should
+	// be freed by calling inflights.freeTo with the index of the last
+	// received entry.
+	ins *inflights
+
+	// IsLearner is true if this progress is tracked for a learner.
+	IsLearner bool
+}
+
+func (pr *Progress) resetState(state ProgressStateType) {
+	pr.Paused = false
+	pr.PendingSnapshot = 0
+	pr.State = state
+	pr.ins.reset()
+}
+
+func (pr *Progress) becomeProbe() {
+	// If the original state is ProgressStateSnapshot, progress knows that
+	// the pending snapshot has been sent to this peer successfully, then
+	// probes from pendingSnapshot + 1.
+	if pr.State == ProgressStateSnapshot {
+		pendingSnapshot := pr.PendingSnapshot
+		pr.resetState(ProgressStateProbe)
+		pr.Next = max(pr.Match+1, pendingSnapshot+1)
+	} else {
+		pr.resetState(ProgressStateProbe)
+		pr.Next = pr.Match + 1
+	}
+}
+
+func (pr *Progress) becomeReplicate() {
+	pr.resetState(ProgressStateReplicate)
+	pr.Next = pr.Match + 1
+}
+
+func (pr *Progress) becomeSnapshot(snapshoti uint64) {
+	pr.resetState(ProgressStateSnapshot)
+	pr.PendingSnapshot = snapshoti
+}
+
+// maybeUpdate returns false if the given n index comes from an outdated message.
+// Otherwise it updates the progress and returns true.
+func (pr *Progress) maybeUpdate(n uint64) bool {
+	var updated bool
+	if pr.Match < n {
+		pr.Match = n
+		updated = true
+		pr.resume()
+	}
+	if pr.Next < n+1 {
+		pr.Next = n + 1
+	}
+	return updated
+}
+
+func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 }
+
+// maybeDecrTo returns false if the given to index comes from an out of order message.
+// Otherwise it decreases the progress next index to min(rejected, last) and returns true.
+func (pr *Progress) maybeDecrTo(rejected, last uint64) bool {
+	if pr.State == ProgressStateReplicate {
+		// the rejection must be stale if the progress has matched and "rejected"
+		// is smaller than "match".
+		if rejected <= pr.Match {
+			return false
+		}
+		// directly decrease next to match + 1
+		pr.Next = pr.Match + 1
+		return true
+	}
+
+	// the rejection must be stale if "rejected" does not match next - 1
+	if pr.Next-1 != rejected {
+		return false
+	}
+
+	if pr.Next = min(rejected, last+1); pr.Next < 1 {
+		pr.Next = 1
+	}
+	pr.resume()
+	return true
+}
+
+func (pr *Progress) pause()  { pr.Paused = true }
+func (pr *Progress) resume() { pr.Paused = false }
+
+// IsPaused returns whether sending log entries to this node has been
+// paused. A node may be paused because it has rejected recent
+// MsgApps, is currently waiting for a snapshot, or has reached the
+// MaxInflightMsgs limit.
+func (pr *Progress) IsPaused() bool {
+	switch pr.State {
+	case ProgressStateProbe:
+		return pr.Paused
+	case ProgressStateReplicate:
+		return pr.ins.full()
+	case ProgressStateSnapshot:
+		return true
+	default:
+		panic("unexpected state")
+	}
+}
+
+func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 }
+
+// needSnapshotAbort returns true if snapshot progress's Match
+// is equal or higher than the pendingSnapshot.
+func (pr *Progress) needSnapshotAbort() bool {
+	return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot
+}
+
+func (pr *Progress) String() string {
+	return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.IsPaused(), pr.PendingSnapshot)
+}
+
+type inflights struct {
+	// the starting index in the buffer
+	start int
+	// number of inflights in the buffer
+	count int
+
+	// the size of the buffer
+	size int
+
+	// buffer contains the index of the last entry
+	// inside one message.
+	buffer []uint64
+}
+
+func newInflights(size int) *inflights {
+	return &inflights{
+		size: size,
+	}
+}
+
+// add adds an inflight into inflights
+func (in *inflights) add(inflight uint64) {
+	if in.full() {
+		panic("cannot add into a full inflights")
+	}
+	next := in.start + in.count
+	size := in.size
+	if next >= size {
+		next -= size
+	}
+	if next >= len(in.buffer) {
+		in.growBuf()
+	}
+	in.buffer[next] = inflight
+	in.count++
+}
+
+// grow the inflight buffer by doubling up to inflights.size. We grow on demand
+// instead of preallocating to inflights.size to handle systems which have
+// thousands of Raft groups per process.
+func (in *inflights) growBuf() {
+	newSize := len(in.buffer) * 2
+	if newSize == 0 {
+		newSize = 1
+	} else if newSize > in.size {
+		newSize = in.size
+	}
+	newBuffer := make([]uint64, newSize)
+	copy(newBuffer, in.buffer)
+	in.buffer = newBuffer
+}
+
+// freeTo frees the inflights smaller or equal to the given `to` flight.
+func (in *inflights) freeTo(to uint64) {
+	if in.count == 0 || to < in.buffer[in.start] {
+		// out of the left side of the window
+		return
+	}
+
+	idx := in.start
+	var i int
+	for i = 0; i < in.count; i++ {
+		if to < in.buffer[idx] { // found the first large inflight
+			break
+		}
+
+		// increase index and maybe rotate
+		size := in.size
+		if idx++; idx >= size {
+			idx -= size
+		}
+	}
+	// free i inflights and set new start index
+	in.count -= i
+	in.start = idx
+	if in.count == 0 {
+		// inflights is empty, reset the start index so that we don't grow the
+		// buffer unnecessarily.
+		in.start = 0
+	}
+}
+
+func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) }
+
+// full returns true if the inflights is full.
+func (in *inflights) full() bool {
+	return in.count == in.size
+}
+
+// resets frees all inflights.
+func (in *inflights) reset() {
+	in.count = 0
+	in.start = 0
+}
diff --git a/vendor/github.com/coreos/etcd/raft/raft.go b/vendor/github.com/coreos/etcd/raft/raft.go
new file mode 100644
index 0000000..22ff138
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raft.go
@@ -0,0 +1,1407 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"math"
+	"math/rand"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// None is a placeholder node ID used when there is no leader.
+const None uint64 = 0
+const noLimit = math.MaxUint64
+
+// Possible values for StateType.
+const (
+	StateFollower StateType = iota
+	StateCandidate
+	StateLeader
+	StatePreCandidate
+	numStates
+)
+
+type ReadOnlyOption int
+
+const (
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	ReadOnlySafe ReadOnlyOption = iota
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	ReadOnlyLeaseBased
+)
+
+// Possible values for CampaignType
+const (
+	// campaignPreElection represents the first phase of a normal election when
+	// Config.PreVote is true.
+	campaignPreElection CampaignType = "CampaignPreElection"
+	// campaignElection represents a normal (time-based) election (the second phase
+	// of the election when Config.PreVote is true).
+	campaignElection CampaignType = "CampaignElection"
+	// campaignTransfer represents the type of leader transfer
+	campaignTransfer CampaignType = "CampaignTransfer"
+)
+
+// lockedRand is a small wrapper around rand.Rand to provide
+// synchronization. Only the methods needed by the code are exposed
+// (e.g. Intn).
+type lockedRand struct {
+	mu   sync.Mutex
+	rand *rand.Rand
+}
+
+func (r *lockedRand) Intn(n int) int {
+	r.mu.Lock()
+	v := r.rand.Intn(n)
+	r.mu.Unlock()
+	return v
+}
+
+var globalRand = &lockedRand{
+	rand: rand.New(rand.NewSource(time.Now().UnixNano())),
+}
+
+// CampaignType represents the type of campaigning
+// the reason we use the type of string instead of uint64
+// is because it's simpler to compare and fill in raft entries
+type CampaignType string
+
+// StateType represents the role of a node in a cluster.
+type StateType uint64
+
+var stmap = [...]string{
+	"StateFollower",
+	"StateCandidate",
+	"StateLeader",
+	"StatePreCandidate",
+}
+
+func (st StateType) String() string {
+	return stmap[uint64(st)]
+}
+
+// Config contains the parameters to start a raft.
+type Config struct {
+	// ID is the identity of the local raft. ID cannot be 0.
+	ID uint64
+
+	// peers contains the IDs of all nodes (including self) in the raft cluster. It
+	// should only be set when starting a new raft cluster. Restarting raft from
+	// previous configuration will panic if peers is set. peer is private and only
+	// used for testing right now.
+	peers []uint64
+
+	// learners contains the IDs of all leaner nodes (including self if the local node is a leaner) in the raft cluster.
+	// learners only receives entries from the leader node. It does not vote or promote itself.
+	learners []uint64
+
+	// ElectionTick is the number of Node.Tick invocations that must pass between
+	// elections. That is, if a follower does not receive any message from the
+	// leader of current term before ElectionTick has elapsed, it will become
+	// candidate and start an election. ElectionTick must be greater than
+	// HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid
+	// unnecessary leader switching.
+	ElectionTick int
+	// HeartbeatTick is the number of Node.Tick invocations that must pass between
+	// heartbeats. That is, a leader sends heartbeat messages to maintain its
+	// leadership every HeartbeatTick ticks.
+	HeartbeatTick int
+
+	// Storage is the storage for raft. raft generates entries and states to be
+	// stored in storage. raft reads the persisted entries and states out of
+	// Storage when it needs. raft reads out the previous state and configuration
+	// out of storage when restarting.
+	Storage Storage
+	// Applied is the last applied index. It should only be set when restarting
+	// raft. raft will not return entries to the application smaller or equal to
+	// Applied. If Applied is unset when restarting, raft might return previous
+	// applied entries. This is a very application dependent configuration.
+	Applied uint64
+
+	// MaxSizePerMsg limits the max size of each append message. Smaller value
+	// lowers the raft recovery cost(initial probing and message lost during normal
+	// operation). On the other side, it might affect the throughput during normal
+	// replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per
+	// message.
+	MaxSizePerMsg uint64
+	// MaxInflightMsgs limits the max number of in-flight append messages during
+	// optimistic replication phase. The application transportation layer usually
+	// has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid
+	// overflowing that sending buffer. TODO (xiangli): feedback to application to
+	// limit the proposal rate?
+	MaxInflightMsgs int
+
+	// CheckQuorum specifies if the leader should check quorum activity. Leader
+	// steps down when quorum is not active for an electionTimeout.
+	CheckQuorum bool
+
+	// PreVote enables the Pre-Vote algorithm described in raft thesis section
+	// 9.6. This prevents disruption when a node that has been partitioned away
+	// rejoins the cluster.
+	PreVote bool
+
+	// ReadOnlyOption specifies how the read only request is processed.
+	//
+	// ReadOnlySafe guarantees the linearizability of the read only request by
+	// communicating with the quorum. It is the default and suggested option.
+	//
+	// ReadOnlyLeaseBased ensures linearizability of the read only request by
+	// relying on the leader lease. It can be affected by clock drift.
+	// If the clock drift is unbounded, leader might keep the lease longer than it
+	// should (clock can move backward/pause without any bound). ReadIndex is not safe
+	// in that case.
+	// CheckQuorum MUST be enabled if ReadOnlyOption is ReadOnlyLeaseBased.
+	ReadOnlyOption ReadOnlyOption
+
+	// Logger is the logger used for raft log. For multinode which can host
+	// multiple raft group, each raft group can have its own logger
+	Logger Logger
+
+	// DisableProposalForwarding set to true means that followers will drop
+	// proposals, rather than forwarding them to the leader. One use case for
+	// this feature would be in a situation where the Raft leader is used to
+	// compute the data of a proposal, for example, adding a timestamp from a
+	// hybrid logical clock to data in a monotonically increasing way. Forwarding
+	// should be disabled to prevent a follower with an innaccurate hybrid
+	// logical clock from assigning the timestamp and then forwarding the data
+	// to the leader.
+	DisableProposalForwarding bool
+}
+
+func (c *Config) validate() error {
+	if c.ID == None {
+		return errors.New("cannot use none as id")
+	}
+
+	if c.HeartbeatTick <= 0 {
+		return errors.New("heartbeat tick must be greater than 0")
+	}
+
+	if c.ElectionTick <= c.HeartbeatTick {
+		return errors.New("election tick must be greater than heartbeat tick")
+	}
+
+	if c.Storage == nil {
+		return errors.New("storage cannot be nil")
+	}
+
+	if c.MaxInflightMsgs <= 0 {
+		return errors.New("max inflight messages must be greater than 0")
+	}
+
+	if c.Logger == nil {
+		c.Logger = raftLogger
+	}
+
+	if c.ReadOnlyOption == ReadOnlyLeaseBased && !c.CheckQuorum {
+		return errors.New("CheckQuorum must be enabled when ReadOnlyOption is ReadOnlyLeaseBased")
+	}
+
+	return nil
+}
+
+type raft struct {
+	id uint64
+
+	Term uint64
+	Vote uint64
+
+	readStates []ReadState
+
+	// the log
+	raftLog *raftLog
+
+	maxInflight int
+	maxMsgSize  uint64
+	prs         map[uint64]*Progress
+	learnerPrs  map[uint64]*Progress
+
+	state StateType
+
+	// isLearner is true if the local raft node is a learner.
+	isLearner bool
+
+	votes map[uint64]bool
+
+	msgs []pb.Message
+
+	// the leader id
+	lead uint64
+	// leadTransferee is id of the leader transfer target when its value is not zero.
+	// Follow the procedure defined in raft thesis 3.10.
+	leadTransferee uint64
+	// New configuration is ignored if there exists unapplied configuration.
+	pendingConf bool
+
+	readOnly *readOnly
+
+	// number of ticks since it reached last electionTimeout when it is leader
+	// or candidate.
+	// number of ticks since it reached last electionTimeout or received a
+	// valid message from current leader when it is a follower.
+	electionElapsed int
+
+	// number of ticks since it reached last heartbeatTimeout.
+	// only leader keeps heartbeatElapsed.
+	heartbeatElapsed int
+
+	checkQuorum bool
+	preVote     bool
+
+	heartbeatTimeout int
+	electionTimeout  int
+	// randomizedElectionTimeout is a random number between
+	// [electiontimeout, 2 * electiontimeout - 1]. It gets reset
+	// when raft changes its state to follower or candidate.
+	randomizedElectionTimeout int
+	disableProposalForwarding bool
+
+	tick func()
+	step stepFunc
+
+	logger Logger
+}
+
+func newRaft(c *Config) *raft {
+	if err := c.validate(); err != nil {
+		panic(err.Error())
+	}
+	raftlog := newLog(c.Storage, c.Logger)
+	hs, cs, err := c.Storage.InitialState()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	peers := c.peers
+	learners := c.learners
+	if len(cs.Nodes) > 0 || len(cs.Learners) > 0 {
+		if len(peers) > 0 || len(learners) > 0 {
+			// TODO(bdarnell): the peers argument is always nil except in
+			// tests; the argument should be removed and these tests should be
+			// updated to specify their nodes through a snapshot.
+			panic("cannot specify both newRaft(peers, learners) and ConfState.(Nodes, Learners)")
+		}
+		peers = cs.Nodes
+		learners = cs.Learners
+	}
+	r := &raft{
+		id:                        c.ID,
+		lead:                      None,
+		isLearner:                 false,
+		raftLog:                   raftlog,
+		maxMsgSize:                c.MaxSizePerMsg,
+		maxInflight:               c.MaxInflightMsgs,
+		prs:                       make(map[uint64]*Progress),
+		learnerPrs:                make(map[uint64]*Progress),
+		electionTimeout:           c.ElectionTick,
+		heartbeatTimeout:          c.HeartbeatTick,
+		logger:                    c.Logger,
+		checkQuorum:               c.CheckQuorum,
+		preVote:                   c.PreVote,
+		readOnly:                  newReadOnly(c.ReadOnlyOption),
+		disableProposalForwarding: c.DisableProposalForwarding,
+	}
+	for _, p := range peers {
+		r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)}
+	}
+	for _, p := range learners {
+		if _, ok := r.prs[p]; ok {
+			panic(fmt.Sprintf("node %x is in both learner and peer list", p))
+		}
+		r.learnerPrs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight), IsLearner: true}
+		if r.id == p {
+			r.isLearner = true
+		}
+	}
+
+	if !isHardStateEqual(hs, emptyState) {
+		r.loadState(hs)
+	}
+	if c.Applied > 0 {
+		raftlog.appliedTo(c.Applied)
+	}
+	r.becomeFollower(r.Term, None)
+
+	var nodesStrs []string
+	for _, n := range r.nodes() {
+		nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n))
+	}
+
+	r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]",
+		r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm())
+	return r
+}
+
+func (r *raft) hasLeader() bool { return r.lead != None }
+
+func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} }
+
+func (r *raft) hardState() pb.HardState {
+	return pb.HardState{
+		Term:   r.Term,
+		Vote:   r.Vote,
+		Commit: r.raftLog.committed,
+	}
+}
+
+func (r *raft) quorum() int { return len(r.prs)/2 + 1 }
+
+func (r *raft) nodes() []uint64 {
+	nodes := make([]uint64, 0, len(r.prs)+len(r.learnerPrs))
+	for id := range r.prs {
+		nodes = append(nodes, id)
+	}
+	for id := range r.learnerPrs {
+		nodes = append(nodes, id)
+	}
+	sort.Sort(uint64Slice(nodes))
+	return nodes
+}
+
+// send persists state to stable storage and then sends to its mailbox.
+func (r *raft) send(m pb.Message) {
+	m.From = r.id
+	if m.Type == pb.MsgVote || m.Type == pb.MsgVoteResp || m.Type == pb.MsgPreVote || m.Type == pb.MsgPreVoteResp {
+		if m.Term == 0 {
+			// All {pre-,}campaign messages need to have the term set when
+			// sending.
+			// - MsgVote: m.Term is the term the node is campaigning for,
+			//   non-zero as we increment the term when campaigning.
+			// - MsgVoteResp: m.Term is the new r.Term if the MsgVote was
+			//   granted, non-zero for the same reason MsgVote is
+			// - MsgPreVote: m.Term is the term the node will campaign,
+			//   non-zero as we use m.Term to indicate the next term we'll be
+			//   campaigning for
+			// - MsgPreVoteResp: m.Term is the term received in the original
+			//   MsgPreVote if the pre-vote was granted, non-zero for the
+			//   same reasons MsgPreVote is
+			panic(fmt.Sprintf("term should be set when sending %s", m.Type))
+		}
+	} else {
+		if m.Term != 0 {
+			panic(fmt.Sprintf("term should not be set when sending %s (was %d)", m.Type, m.Term))
+		}
+		// do not attach term to MsgProp, MsgReadIndex
+		// proposals are a way to forward to the leader and
+		// should be treated as local message.
+		// MsgReadIndex is also forwarded to leader.
+		if m.Type != pb.MsgProp && m.Type != pb.MsgReadIndex {
+			m.Term = r.Term
+		}
+	}
+	r.msgs = append(r.msgs, m)
+}
+
+func (r *raft) getProgress(id uint64) *Progress {
+	if pr, ok := r.prs[id]; ok {
+		return pr
+	}
+
+	return r.learnerPrs[id]
+}
+
+// sendAppend sends RPC, with entries to the given peer.
+func (r *raft) sendAppend(to uint64) {
+	pr := r.getProgress(to)
+	if pr.IsPaused() {
+		return
+	}
+	m := pb.Message{}
+	m.To = to
+
+	term, errt := r.raftLog.term(pr.Next - 1)
+	ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize)
+
+	if errt != nil || erre != nil { // send snapshot if we failed to get term or entries
+		if !pr.RecentActive {
+			r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to)
+			return
+		}
+
+		m.Type = pb.MsgSnap
+		snapshot, err := r.raftLog.snapshot()
+		if err != nil {
+			if err == ErrSnapshotTemporarilyUnavailable {
+				r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to)
+				return
+			}
+			panic(err) // TODO(bdarnell)
+		}
+		if IsEmptySnap(snapshot) {
+			panic("need non-empty snapshot")
+		}
+		m.Snapshot = snapshot
+		sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term
+		r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]",
+			r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr)
+		pr.becomeSnapshot(sindex)
+		r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr)
+	} else {
+		m.Type = pb.MsgApp
+		m.Index = pr.Next - 1
+		m.LogTerm = term
+		m.Entries = ents
+		m.Commit = r.raftLog.committed
+		if n := len(m.Entries); n != 0 {
+			switch pr.State {
+			// optimistically increase the next when in ProgressStateReplicate
+			case ProgressStateReplicate:
+				last := m.Entries[n-1].Index
+				pr.optimisticUpdate(last)
+				pr.ins.add(last)
+			case ProgressStateProbe:
+				pr.pause()
+			default:
+				r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State)
+			}
+		}
+	}
+	r.send(m)
+}
+
+// sendHeartbeat sends an empty MsgApp
+func (r *raft) sendHeartbeat(to uint64, ctx []byte) {
+	// Attach the commit as min(to.matched, r.committed).
+	// When the leader sends out heartbeat message,
+	// the receiver(follower) might not be matched with the leader
+	// or it might not have all the committed entries.
+	// The leader MUST NOT forward the follower's commit to
+	// an unmatched index.
+	commit := min(r.getProgress(to).Match, r.raftLog.committed)
+	m := pb.Message{
+		To:      to,
+		Type:    pb.MsgHeartbeat,
+		Commit:  commit,
+		Context: ctx,
+	}
+
+	r.send(m)
+}
+
+func (r *raft) forEachProgress(f func(id uint64, pr *Progress)) {
+	for id, pr := range r.prs {
+		f(id, pr)
+	}
+
+	for id, pr := range r.learnerPrs {
+		f(id, pr)
+	}
+}
+
+// bcastAppend sends RPC, with entries to all peers that are not up-to-date
+// according to the progress recorded in r.prs.
+func (r *raft) bcastAppend() {
+	r.forEachProgress(func(id uint64, _ *Progress) {
+		if id == r.id {
+			return
+		}
+
+		r.sendAppend(id)
+	})
+}
+
+// bcastHeartbeat sends RPC, without entries to all the peers.
+func (r *raft) bcastHeartbeat() {
+	lastCtx := r.readOnly.lastPendingRequestCtx()
+	if len(lastCtx) == 0 {
+		r.bcastHeartbeatWithCtx(nil)
+	} else {
+		r.bcastHeartbeatWithCtx([]byte(lastCtx))
+	}
+}
+
+func (r *raft) bcastHeartbeatWithCtx(ctx []byte) {
+	r.forEachProgress(func(id uint64, _ *Progress) {
+		if id == r.id {
+			return
+		}
+		r.sendHeartbeat(id, ctx)
+	})
+}
+
+// maybeCommit attempts to advance the commit index. Returns true if
+// the commit index changed (in which case the caller should call
+// r.bcastAppend).
+func (r *raft) maybeCommit() bool {
+	// TODO(bmizerany): optimize.. Currently naive
+	mis := make(uint64Slice, 0, len(r.prs))
+	for _, p := range r.prs {
+		mis = append(mis, p.Match)
+	}
+	sort.Sort(sort.Reverse(mis))
+	mci := mis[r.quorum()-1]
+	return r.raftLog.maybeCommit(mci, r.Term)
+}
+
+func (r *raft) reset(term uint64) {
+	if r.Term != term {
+		r.Term = term
+		r.Vote = None
+	}
+	r.lead = None
+
+	r.electionElapsed = 0
+	r.heartbeatElapsed = 0
+	r.resetRandomizedElectionTimeout()
+
+	r.abortLeaderTransfer()
+
+	r.votes = make(map[uint64]bool)
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		*pr = Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight), IsLearner: pr.IsLearner}
+		if id == r.id {
+			pr.Match = r.raftLog.lastIndex()
+		}
+	})
+
+	r.pendingConf = false
+	r.readOnly = newReadOnly(r.readOnly.option)
+}
+
+func (r *raft) appendEntry(es ...pb.Entry) {
+	li := r.raftLog.lastIndex()
+	for i := range es {
+		es[i].Term = r.Term
+		es[i].Index = li + 1 + uint64(i)
+	}
+	r.raftLog.append(es...)
+	r.getProgress(r.id).maybeUpdate(r.raftLog.lastIndex())
+	// Regardless of maybeCommit's return, our caller will call bcastAppend.
+	r.maybeCommit()
+}
+
+// tickElection is run by followers and candidates after r.electionTimeout.
+func (r *raft) tickElection() {
+	r.electionElapsed++
+
+	if r.promotable() && r.pastElectionTimeout() {
+		r.electionElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgHup})
+	}
+}
+
+// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout.
+func (r *raft) tickHeartbeat() {
+	r.heartbeatElapsed++
+	r.electionElapsed++
+
+	if r.electionElapsed >= r.electionTimeout {
+		r.electionElapsed = 0
+		if r.checkQuorum {
+			r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum})
+		}
+		// If current leader cannot transfer leadership in electionTimeout, it becomes leader again.
+		if r.state == StateLeader && r.leadTransferee != None {
+			r.abortLeaderTransfer()
+		}
+	}
+
+	if r.state != StateLeader {
+		return
+	}
+
+	if r.heartbeatElapsed >= r.heartbeatTimeout {
+		r.heartbeatElapsed = 0
+		r.Step(pb.Message{From: r.id, Type: pb.MsgBeat})
+	}
+}
+
+func (r *raft) becomeFollower(term uint64, lead uint64) {
+	r.step = stepFollower
+	r.reset(term)
+	r.tick = r.tickElection
+	r.lead = lead
+	r.state = StateFollower
+	r.logger.Infof("%x became follower at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> candidate]")
+	}
+	r.step = stepCandidate
+	r.reset(r.Term + 1)
+	r.tick = r.tickElection
+	r.Vote = r.id
+	r.state = StateCandidate
+	r.logger.Infof("%x became candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomePreCandidate() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateLeader {
+		panic("invalid transition [leader -> pre-candidate]")
+	}
+	// Becoming a pre-candidate changes our step functions and state,
+	// but doesn't change anything else. In particular it does not increase
+	// r.Term or change r.Vote.
+	r.step = stepCandidate
+	r.votes = make(map[uint64]bool)
+	r.tick = r.tickElection
+	r.lead = None
+	r.state = StatePreCandidate
+	r.logger.Infof("%x became pre-candidate at term %d", r.id, r.Term)
+}
+
+func (r *raft) becomeLeader() {
+	// TODO(xiangli) remove the panic when the raft implementation is stable
+	if r.state == StateFollower {
+		panic("invalid transition [follower -> leader]")
+	}
+	r.step = stepLeader
+	r.reset(r.Term)
+	r.tick = r.tickHeartbeat
+	r.lead = r.id
+	r.state = StateLeader
+	ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit)
+	if err != nil {
+		r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err)
+	}
+
+	nconf := numOfPendingConf(ents)
+	if nconf > 1 {
+		panic("unexpected multiple uncommitted config entry")
+	}
+	if nconf == 1 {
+		r.pendingConf = true
+	}
+
+	r.appendEntry(pb.Entry{Data: nil})
+	r.logger.Infof("%x became leader at term %d", r.id, r.Term)
+}
+
+func (r *raft) campaign(t CampaignType) {
+	var term uint64
+	var voteMsg pb.MessageType
+	if t == campaignPreElection {
+		r.becomePreCandidate()
+		voteMsg = pb.MsgPreVote
+		// PreVote RPCs are sent for the next term before we've incremented r.Term.
+		term = r.Term + 1
+	} else {
+		r.becomeCandidate()
+		voteMsg = pb.MsgVote
+		term = r.Term
+	}
+	if r.quorum() == r.poll(r.id, voteRespMsgType(voteMsg), true) {
+		// We won the election after voting for ourselves (which must mean that
+		// this is a single-node cluster). Advance to the next state.
+		if t == campaignPreElection {
+			r.campaign(campaignElection)
+		} else {
+			r.becomeLeader()
+		}
+		return
+	}
+	for id := range r.prs {
+		if id == r.id {
+			continue
+		}
+		r.logger.Infof("%x [logterm: %d, index: %d] sent %s request to %x at term %d",
+			r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), voteMsg, id, r.Term)
+
+		var ctx []byte
+		if t == campaignTransfer {
+			ctx = []byte(t)
+		}
+		r.send(pb.Message{Term: term, To: id, Type: voteMsg, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm(), Context: ctx})
+	}
+}
+
+func (r *raft) poll(id uint64, t pb.MessageType, v bool) (granted int) {
+	if v {
+		r.logger.Infof("%x received %s from %x at term %d", r.id, t, id, r.Term)
+	} else {
+		r.logger.Infof("%x received %s rejection from %x at term %d", r.id, t, id, r.Term)
+	}
+	if _, ok := r.votes[id]; !ok {
+		r.votes[id] = v
+	}
+	for _, vv := range r.votes {
+		if vv {
+			granted++
+		}
+	}
+	return granted
+}
+
+func (r *raft) Step(m pb.Message) error {
+	// Handle the message term, which may result in our stepping down to a follower.
+	switch {
+	case m.Term == 0:
+		// local message
+	case m.Term > r.Term:
+		if m.Type == pb.MsgVote || m.Type == pb.MsgPreVote {
+			force := bytes.Equal(m.Context, []byte(campaignTransfer))
+			inLease := r.checkQuorum && r.lead != None && r.electionElapsed < r.electionTimeout
+			if !force && inLease {
+				// If a server receives a RequestVote request within the minimum election timeout
+				// of hearing from a current leader, it does not update its term or grant its vote
+				r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: lease is not expired (remaining ticks: %d)",
+					r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term, r.electionTimeout-r.electionElapsed)
+				return nil
+			}
+		}
+		switch {
+		case m.Type == pb.MsgPreVote:
+			// Never change our term in response to a PreVote
+		case m.Type == pb.MsgPreVoteResp && !m.Reject:
+			// We send pre-vote requests with a term in our future. If the
+			// pre-vote is granted, we will increment our term when we get a
+			// quorum. If it is not, the term comes from the node that
+			// rejected our vote so we should become a follower at the new
+			// term.
+		default:
+			r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+			if m.Type == pb.MsgApp || m.Type == pb.MsgHeartbeat || m.Type == pb.MsgSnap {
+				r.becomeFollower(m.Term, m.From)
+			} else {
+				r.becomeFollower(m.Term, None)
+			}
+		}
+
+	case m.Term < r.Term:
+		if r.checkQuorum && (m.Type == pb.MsgHeartbeat || m.Type == pb.MsgApp) {
+			// We have received messages from a leader at a lower term. It is possible
+			// that these messages were simply delayed in the network, but this could
+			// also mean that this node has advanced its term number during a network
+			// partition, and it is now unable to either win an election or to rejoin
+			// the majority on the old term. If checkQuorum is false, this will be
+			// handled by incrementing term numbers in response to MsgVote with a
+			// higher term, but if checkQuorum is true we may not advance the term on
+			// MsgVote and must generate other messages to advance the term. The net
+			// result of these two features is to minimize the disruption caused by
+			// nodes that have been removed from the cluster's configuration: a
+			// removed node will send MsgVotes (or MsgPreVotes) which will be ignored,
+			// but it will not receive MsgApp or MsgHeartbeat, so it will not create
+			// disruptive term increases
+			r.send(pb.Message{To: m.From, Type: pb.MsgAppResp})
+		} else {
+			// ignore other cases
+			r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]",
+				r.id, r.Term, m.Type, m.From, m.Term)
+		}
+		return nil
+	}
+
+	switch m.Type {
+	case pb.MsgHup:
+		if r.state != StateLeader {
+			ents, err := r.raftLog.slice(r.raftLog.applied+1, r.raftLog.committed+1, noLimit)
+			if err != nil {
+				r.logger.Panicf("unexpected error getting unapplied entries (%v)", err)
+			}
+			if n := numOfPendingConf(ents); n != 0 && r.raftLog.committed > r.raftLog.applied {
+				r.logger.Warningf("%x cannot campaign at term %d since there are still %d pending configuration changes to apply", r.id, r.Term, n)
+				return nil
+			}
+
+			r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term)
+			if r.preVote {
+				r.campaign(campaignPreElection)
+			} else {
+				r.campaign(campaignElection)
+			}
+		} else {
+			r.logger.Debugf("%x ignoring MsgHup because already leader", r.id)
+		}
+
+	case pb.MsgVote, pb.MsgPreVote:
+		if r.isLearner {
+			// TODO: learner may need to vote, in case of node down when confchange.
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] ignored %s from %x [logterm: %d, index: %d] at term %d: learner can not vote",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			return nil
+		}
+		// The m.Term > r.Term clause is for MsgPreVote. For MsgVote m.Term should
+		// always equal r.Term.
+		if (r.Vote == None || m.Term > r.Term || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] cast %s for %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			// When responding to Msg{Pre,}Vote messages we include the term
+			// from the message, not the local term. To see why consider the
+			// case where a single node was previously partitioned away and
+			// it's local term is now of date. If we include the local term
+			// (recall that for pre-votes we don't update the local term), the
+			// (pre-)campaigning node on the other end will proceed to ignore
+			// the message (it ignores all out of date messages).
+			// The term in the original message and current local term are the
+			// same in the case of regular votes, but different for pre-votes.
+			r.send(pb.Message{To: m.From, Term: m.Term, Type: voteRespMsgType(m.Type)})
+			if m.Type == pb.MsgVote {
+				// Only record real votes.
+				r.electionElapsed = 0
+				r.Vote = m.From
+			}
+		} else {
+			r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected %s from %x [logterm: %d, index: %d] at term %d",
+				r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.Type, m.From, m.LogTerm, m.Index, r.Term)
+			r.send(pb.Message{To: m.From, Term: r.Term, Type: voteRespMsgType(m.Type), Reject: true})
+		}
+
+	default:
+		r.step(r, m)
+	}
+	return nil
+}
+
+type stepFunc func(r *raft, m pb.Message)
+
+func stepLeader(r *raft, m pb.Message) {
+	// These message types do not require any progress for m.From.
+	switch m.Type {
+	case pb.MsgBeat:
+		r.bcastHeartbeat()
+		return
+	case pb.MsgCheckQuorum:
+		if !r.checkQuorumActive() {
+			r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id)
+			r.becomeFollower(r.Term, None)
+		}
+		return
+	case pb.MsgProp:
+		if len(m.Entries) == 0 {
+			r.logger.Panicf("%x stepped empty MsgProp", r.id)
+		}
+		if _, ok := r.prs[r.id]; !ok {
+			// If we are not currently a member of the range (i.e. this node
+			// was removed from the configuration while serving as leader),
+			// drop any new proposals.
+			return
+		}
+		if r.leadTransferee != None {
+			r.logger.Debugf("%x [term %d] transfer leadership to %x is in progress; dropping proposal", r.id, r.Term, r.leadTransferee)
+			return
+		}
+
+		for i, e := range m.Entries {
+			if e.Type == pb.EntryConfChange {
+				if r.pendingConf {
+					r.logger.Infof("propose conf %s ignored since pending unapplied configuration", e.String())
+					m.Entries[i] = pb.Entry{Type: pb.EntryNormal}
+				}
+				r.pendingConf = true
+			}
+		}
+		r.appendEntry(m.Entries...)
+		r.bcastAppend()
+		return
+	case pb.MsgReadIndex:
+		if r.quorum() > 1 {
+			if r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(r.raftLog.committed)) != r.Term {
+				// Reject read only request when this leader has not committed any log entry at its term.
+				return
+			}
+
+			// thinking: use an interally defined context instead of the user given context.
+			// We can express this in terms of the term and index instead of a user-supplied value.
+			// This would allow multiple reads to piggyback on the same message.
+			switch r.readOnly.option {
+			case ReadOnlySafe:
+				r.readOnly.addRequest(r.raftLog.committed, m)
+				r.bcastHeartbeatWithCtx(m.Entries[0].Data)
+			case ReadOnlyLeaseBased:
+				ri := r.raftLog.committed
+				if m.From == None || m.From == r.id { // from local member
+					r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+				} else {
+					r.send(pb.Message{To: m.From, Type: pb.MsgReadIndexResp, Index: ri, Entries: m.Entries})
+				}
+			}
+		} else {
+			r.readStates = append(r.readStates, ReadState{Index: r.raftLog.committed, RequestCtx: m.Entries[0].Data})
+		}
+
+		return
+	}
+
+	// All other message types require a progress for m.From (pr).
+	pr := r.getProgress(m.From)
+	if pr == nil {
+		r.logger.Debugf("%x no progress available for %x", r.id, m.From)
+		return
+	}
+	switch m.Type {
+	case pb.MsgAppResp:
+		pr.RecentActive = true
+
+		if m.Reject {
+			r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d",
+				r.id, m.RejectHint, m.From, m.Index)
+			if pr.maybeDecrTo(m.Index, m.RejectHint) {
+				r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr)
+				if pr.State == ProgressStateReplicate {
+					pr.becomeProbe()
+				}
+				r.sendAppend(m.From)
+			}
+		} else {
+			oldPaused := pr.IsPaused()
+			if pr.maybeUpdate(m.Index) {
+				switch {
+				case pr.State == ProgressStateProbe:
+					pr.becomeReplicate()
+				case pr.State == ProgressStateSnapshot && pr.needSnapshotAbort():
+					r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+					pr.becomeProbe()
+				case pr.State == ProgressStateReplicate:
+					pr.ins.freeTo(m.Index)
+				}
+
+				if r.maybeCommit() {
+					r.bcastAppend()
+				} else if oldPaused {
+					// update() reset the wait state on this node. If we had delayed sending
+					// an update before, send it now.
+					r.sendAppend(m.From)
+				}
+				// Transfer leadership is in progress.
+				if m.From == r.leadTransferee && pr.Match == r.raftLog.lastIndex() {
+					r.logger.Infof("%x sent MsgTimeoutNow to %x after received MsgAppResp", r.id, m.From)
+					r.sendTimeoutNow(m.From)
+				}
+			}
+		}
+	case pb.MsgHeartbeatResp:
+		pr.RecentActive = true
+		pr.resume()
+
+		// free one slot for the full inflights window to allow progress.
+		if pr.State == ProgressStateReplicate && pr.ins.full() {
+			pr.ins.freeFirstOne()
+		}
+		if pr.Match < r.raftLog.lastIndex() {
+			r.sendAppend(m.From)
+		}
+
+		if r.readOnly.option != ReadOnlySafe || len(m.Context) == 0 {
+			return
+		}
+
+		ackCount := r.readOnly.recvAck(m)
+		if ackCount < r.quorum() {
+			return
+		}
+
+		rss := r.readOnly.advance(m)
+		for _, rs := range rss {
+			req := rs.req
+			if req.From == None || req.From == r.id { // from local member
+				r.readStates = append(r.readStates, ReadState{Index: rs.index, RequestCtx: req.Entries[0].Data})
+			} else {
+				r.send(pb.Message{To: req.From, Type: pb.MsgReadIndexResp, Index: rs.index, Entries: req.Entries})
+			}
+		}
+	case pb.MsgSnapStatus:
+		if pr.State != ProgressStateSnapshot {
+			return
+		}
+		if !m.Reject {
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		} else {
+			pr.snapshotFailure()
+			pr.becomeProbe()
+			r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr)
+		}
+		// If snapshot finish, wait for the msgAppResp from the remote node before sending
+		// out the next msgApp.
+		// If snapshot failure, wait for a heartbeat interval before next try
+		pr.pause()
+	case pb.MsgUnreachable:
+		// During optimistic replication, if the remote becomes unreachable,
+		// there is huge probability that a MsgApp is lost.
+		if pr.State == ProgressStateReplicate {
+			pr.becomeProbe()
+		}
+		r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr)
+	case pb.MsgTransferLeader:
+		if pr.IsLearner {
+			r.logger.Debugf("%x is learner. Ignored transferring leadership", r.id)
+			return
+		}
+		leadTransferee := m.From
+		lastLeadTransferee := r.leadTransferee
+		if lastLeadTransferee != None {
+			if lastLeadTransferee == leadTransferee {
+				r.logger.Infof("%x [term %d] transfer leadership to %x is in progress, ignores request to same node %x",
+					r.id, r.Term, leadTransferee, leadTransferee)
+				return
+			}
+			r.abortLeaderTransfer()
+			r.logger.Infof("%x [term %d] abort previous transferring leadership to %x", r.id, r.Term, lastLeadTransferee)
+		}
+		if leadTransferee == r.id {
+			r.logger.Debugf("%x is already leader. Ignored transferring leadership to self", r.id)
+			return
+		}
+		// Transfer leadership to third party.
+		r.logger.Infof("%x [term %d] starts to transfer leadership to %x", r.id, r.Term, leadTransferee)
+		// Transfer leadership should be finished in one electionTimeout, so reset r.electionElapsed.
+		r.electionElapsed = 0
+		r.leadTransferee = leadTransferee
+		if pr.Match == r.raftLog.lastIndex() {
+			r.sendTimeoutNow(leadTransferee)
+			r.logger.Infof("%x sends MsgTimeoutNow to %x immediately as %x already has up-to-date log", r.id, leadTransferee, leadTransferee)
+		} else {
+			r.sendAppend(leadTransferee)
+		}
+	}
+}
+
+// stepCandidate is shared by StateCandidate and StatePreCandidate; the difference is
+// whether they respond to MsgVoteResp or MsgPreVoteResp.
+func stepCandidate(r *raft, m pb.Message) {
+	// Only handle vote responses corresponding to our candidacy (while in
+	// StateCandidate, we may get stale MsgPreVoteResp messages in this term from
+	// our pre-candidate state).
+	var myVoteRespType pb.MessageType
+	if r.state == StatePreCandidate {
+		myVoteRespType = pb.MsgPreVoteResp
+	} else {
+		myVoteRespType = pb.MsgVoteResp
+	}
+	switch m.Type {
+	case pb.MsgProp:
+		r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+		return
+	case pb.MsgApp:
+		r.becomeFollower(r.Term, m.From)
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.becomeFollower(r.Term, m.From)
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.becomeFollower(m.Term, m.From)
+		r.handleSnapshot(m)
+	case myVoteRespType:
+		gr := r.poll(m.From, m.Type, !m.Reject)
+		r.logger.Infof("%x [quorum:%d] has received %d %s votes and %d vote rejections", r.id, r.quorum(), gr, m.Type, len(r.votes)-gr)
+		switch r.quorum() {
+		case gr:
+			if r.state == StatePreCandidate {
+				r.campaign(campaignElection)
+			} else {
+				r.becomeLeader()
+				r.bcastAppend()
+			}
+		case len(r.votes) - gr:
+			r.becomeFollower(r.Term, None)
+		}
+	case pb.MsgTimeoutNow:
+		r.logger.Debugf("%x [term %d state %v] ignored MsgTimeoutNow from %x", r.id, r.Term, r.state, m.From)
+	}
+}
+
+func stepFollower(r *raft, m pb.Message) {
+	switch m.Type {
+	case pb.MsgProp:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term)
+			return
+		} else if r.disableProposalForwarding {
+			r.logger.Infof("%x not forwarding to leader %x at term %d; dropping proposal", r.id, r.lead, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgApp:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleAppendEntries(m)
+	case pb.MsgHeartbeat:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleHeartbeat(m)
+	case pb.MsgSnap:
+		r.electionElapsed = 0
+		r.lead = m.From
+		r.handleSnapshot(m)
+	case pb.MsgTransferLeader:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping leader transfer msg", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgTimeoutNow:
+		if r.promotable() {
+			r.logger.Infof("%x [term %d] received MsgTimeoutNow from %x and starts an election to get leadership.", r.id, r.Term, m.From)
+			// Leadership transfers never use pre-vote even if r.preVote is true; we
+			// know we are not recovering from a partition so there is no need for the
+			// extra round trip.
+			r.campaign(campaignTransfer)
+		} else {
+			r.logger.Infof("%x received MsgTimeoutNow from %x but is not promotable", r.id, m.From)
+		}
+	case pb.MsgReadIndex:
+		if r.lead == None {
+			r.logger.Infof("%x no leader at term %d; dropping index reading msg", r.id, r.Term)
+			return
+		}
+		m.To = r.lead
+		r.send(m)
+	case pb.MsgReadIndexResp:
+		if len(m.Entries) != 1 {
+			r.logger.Errorf("%x invalid format of MsgReadIndexResp from %x, entries count: %d", r.id, m.From, len(m.Entries))
+			return
+		}
+		r.readStates = append(r.readStates, ReadState{Index: m.Index, RequestCtx: m.Entries[0].Data})
+	}
+}
+
+func (r *raft) handleAppendEntries(m pb.Message) {
+	if m.Index < r.raftLog.committed {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+		return
+	}
+
+	if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok {
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex})
+	} else {
+		r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x",
+			r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()})
+	}
+}
+
+func (r *raft) handleHeartbeat(m pb.Message) {
+	r.raftLog.commitTo(m.Commit)
+	r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp, Context: m.Context})
+}
+
+func (r *raft) handleSnapshot(m pb.Message) {
+	sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term
+	if r.restore(m.Snapshot) {
+		r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()})
+	} else {
+		r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, sindex, sterm)
+		r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed})
+	}
+}
+
+// restore recovers the state machine from a snapshot. It restores the log and the
+// configuration of state machine.
+func (r *raft) restore(s pb.Snapshot) bool {
+	if s.Metadata.Index <= r.raftLog.committed {
+		return false
+	}
+	if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) {
+		r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]",
+			r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+		r.raftLog.commitTo(s.Metadata.Index)
+		return false
+	}
+
+	// The normal peer can't become learner.
+	if !r.isLearner {
+		for _, id := range s.Metadata.ConfState.Learners {
+			if id == r.id {
+				r.logger.Errorf("%x can't become learner when restores snapshot [index: %d, term: %d]", r.id, s.Metadata.Index, s.Metadata.Term)
+				return false
+			}
+		}
+	}
+
+	r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]",
+		r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term)
+
+	r.raftLog.restore(s)
+	r.prs = make(map[uint64]*Progress)
+	r.learnerPrs = make(map[uint64]*Progress)
+	r.restoreNode(s.Metadata.ConfState.Nodes, false)
+	r.restoreNode(s.Metadata.ConfState.Learners, true)
+	return true
+}
+
+func (r *raft) restoreNode(nodes []uint64, isLearner bool) {
+	for _, n := range nodes {
+		match, next := uint64(0), r.raftLog.lastIndex()+1
+		if n == r.id {
+			match = next - 1
+			r.isLearner = isLearner
+		}
+		r.setProgress(n, match, next, isLearner)
+		r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.getProgress(n))
+	}
+}
+
+// promotable indicates whether state machine can be promoted to leader,
+// which is true when its own id is in progress list.
+func (r *raft) promotable() bool {
+	_, ok := r.prs[r.id]
+	return ok
+}
+
+func (r *raft) addNode(id uint64) {
+	r.addNodeOrLearnerNode(id, false)
+}
+
+func (r *raft) addLearner(id uint64) {
+	r.addNodeOrLearnerNode(id, true)
+}
+
+func (r *raft) addNodeOrLearnerNode(id uint64, isLearner bool) {
+	r.pendingConf = false
+	pr := r.getProgress(id)
+	if pr == nil {
+		r.setProgress(id, 0, r.raftLog.lastIndex()+1, isLearner)
+	} else {
+		if isLearner && !pr.IsLearner {
+			// can only change Learner to Voter
+			r.logger.Infof("%x ignored addLeaner: do not support changing %x from raft peer to learner.", r.id, id)
+			return
+		}
+
+		if isLearner == pr.IsLearner {
+			// Ignore any redundant addNode calls (which can happen because the
+			// initial bootstrapping entries are applied twice).
+			return
+		}
+
+		// change Learner to Voter, use origin Learner progress
+		delete(r.learnerPrs, id)
+		pr.IsLearner = false
+		r.prs[id] = pr
+	}
+
+	if r.id == id {
+		r.isLearner = isLearner
+	}
+
+	// When a node is first added, we should mark it as recently active.
+	// Otherwise, CheckQuorum may cause us to step down if it is invoked
+	// before the added node has a chance to communicate with us.
+	pr = r.getProgress(id)
+	pr.RecentActive = true
+}
+
+func (r *raft) removeNode(id uint64) {
+	r.delProgress(id)
+	r.pendingConf = false
+
+	// do not try to commit or abort transferring if there is no nodes in the cluster.
+	if len(r.prs) == 0 && len(r.learnerPrs) == 0 {
+		return
+	}
+
+	// The quorum size is now smaller, so see if any pending entries can
+	// be committed.
+	if r.maybeCommit() {
+		r.bcastAppend()
+	}
+	// If the removed node is the leadTransferee, then abort the leadership transferring.
+	if r.state == StateLeader && r.leadTransferee == id {
+		r.abortLeaderTransfer()
+	}
+}
+
+func (r *raft) resetPendingConf() { r.pendingConf = false }
+
+func (r *raft) setProgress(id, match, next uint64, isLearner bool) {
+	if !isLearner {
+		delete(r.learnerPrs, id)
+		r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)}
+		return
+	}
+
+	if _, ok := r.prs[id]; ok {
+		panic(fmt.Sprintf("%x unexpected changing from voter to learner for %x", r.id, id))
+	}
+	r.learnerPrs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight), IsLearner: true}
+}
+
+func (r *raft) delProgress(id uint64) {
+	delete(r.prs, id)
+	delete(r.learnerPrs, id)
+}
+
+func (r *raft) loadState(state pb.HardState) {
+	if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() {
+		r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex())
+	}
+	r.raftLog.committed = state.Commit
+	r.Term = state.Term
+	r.Vote = state.Vote
+}
+
+// pastElectionTimeout returns true iff r.electionElapsed is greater
+// than or equal to the randomized election timeout in
+// [electiontimeout, 2 * electiontimeout - 1].
+func (r *raft) pastElectionTimeout() bool {
+	return r.electionElapsed >= r.randomizedElectionTimeout
+}
+
+func (r *raft) resetRandomizedElectionTimeout() {
+	r.randomizedElectionTimeout = r.electionTimeout + globalRand.Intn(r.electionTimeout)
+}
+
+// checkQuorumActive returns true if the quorum is active from
+// the view of the local raft state machine. Otherwise, it returns
+// false.
+// checkQuorumActive also resets all RecentActive to false.
+func (r *raft) checkQuorumActive() bool {
+	var act int
+
+	r.forEachProgress(func(id uint64, pr *Progress) {
+		if id == r.id { // self is always active
+			act++
+			return
+		}
+
+		if pr.RecentActive && !pr.IsLearner {
+			act++
+		}
+
+		pr.RecentActive = false
+	})
+
+	return act >= r.quorum()
+}
+
+func (r *raft) sendTimeoutNow(to uint64) {
+	r.send(pb.Message{To: to, Type: pb.MsgTimeoutNow})
+}
+
+func (r *raft) abortLeaderTransfer() {
+	r.leadTransferee = None
+}
+
+func numOfPendingConf(ents []pb.Entry) int {
+	n := 0
+	for i := range ents {
+		if ents[i].Type == pb.EntryConfChange {
+			n++
+		}
+	}
+	return n
+}
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
new file mode 100644
index 0000000..753bd84
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.pb.go
@@ -0,0 +1,2365 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: raft.proto
+
+package raftpb
+
+import (
+	fmt "fmt"
+	io "io"
+	math "math"
+	math_bits "math/bits"
+
+	_ "github.com/gogo/protobuf/gogoproto"
+	proto "github.com/golang/protobuf/proto"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
+
+type EntryType int32
+
+const (
+	EntryNormal     EntryType = 0
+	EntryConfChange EntryType = 1
+)
+
+var EntryType_name = map[int32]string{
+	0: "EntryNormal",
+	1: "EntryConfChange",
+}
+
+var EntryType_value = map[string]int32{
+	"EntryNormal":     0,
+	"EntryConfChange": 1,
+}
+
+func (x EntryType) Enum() *EntryType {
+	p := new(EntryType)
+	*p = x
+	return p
+}
+
+func (x EntryType) String() string {
+	return proto.EnumName(EntryType_name, int32(x))
+}
+
+func (x *EntryType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType")
+	if err != nil {
+		return err
+	}
+	*x = EntryType(value)
+	return nil
+}
+
+func (EntryType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{0}
+}
+
+type MessageType int32
+
+const (
+	MsgHup            MessageType = 0
+	MsgBeat           MessageType = 1
+	MsgProp           MessageType = 2
+	MsgApp            MessageType = 3
+	MsgAppResp        MessageType = 4
+	MsgVote           MessageType = 5
+	MsgVoteResp       MessageType = 6
+	MsgSnap           MessageType = 7
+	MsgHeartbeat      MessageType = 8
+	MsgHeartbeatResp  MessageType = 9
+	MsgUnreachable    MessageType = 10
+	MsgSnapStatus     MessageType = 11
+	MsgCheckQuorum    MessageType = 12
+	MsgTransferLeader MessageType = 13
+	MsgTimeoutNow     MessageType = 14
+	MsgReadIndex      MessageType = 15
+	MsgReadIndexResp  MessageType = 16
+	MsgPreVote        MessageType = 17
+	MsgPreVoteResp    MessageType = 18
+)
+
+var MessageType_name = map[int32]string{
+	0:  "MsgHup",
+	1:  "MsgBeat",
+	2:  "MsgProp",
+	3:  "MsgApp",
+	4:  "MsgAppResp",
+	5:  "MsgVote",
+	6:  "MsgVoteResp",
+	7:  "MsgSnap",
+	8:  "MsgHeartbeat",
+	9:  "MsgHeartbeatResp",
+	10: "MsgUnreachable",
+	11: "MsgSnapStatus",
+	12: "MsgCheckQuorum",
+	13: "MsgTransferLeader",
+	14: "MsgTimeoutNow",
+	15: "MsgReadIndex",
+	16: "MsgReadIndexResp",
+	17: "MsgPreVote",
+	18: "MsgPreVoteResp",
+}
+
+var MessageType_value = map[string]int32{
+	"MsgHup":            0,
+	"MsgBeat":           1,
+	"MsgProp":           2,
+	"MsgApp":            3,
+	"MsgAppResp":        4,
+	"MsgVote":           5,
+	"MsgVoteResp":       6,
+	"MsgSnap":           7,
+	"MsgHeartbeat":      8,
+	"MsgHeartbeatResp":  9,
+	"MsgUnreachable":    10,
+	"MsgSnapStatus":     11,
+	"MsgCheckQuorum":    12,
+	"MsgTransferLeader": 13,
+	"MsgTimeoutNow":     14,
+	"MsgReadIndex":      15,
+	"MsgReadIndexResp":  16,
+	"MsgPreVote":        17,
+	"MsgPreVoteResp":    18,
+}
+
+func (x MessageType) Enum() *MessageType {
+	p := new(MessageType)
+	*p = x
+	return p
+}
+
+func (x MessageType) String() string {
+	return proto.EnumName(MessageType_name, int32(x))
+}
+
+func (x *MessageType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType")
+	if err != nil {
+		return err
+	}
+	*x = MessageType(value)
+	return nil
+}
+
+func (MessageType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{1}
+}
+
+type ConfChangeType int32
+
+const (
+	ConfChangeAddNode        ConfChangeType = 0
+	ConfChangeRemoveNode     ConfChangeType = 1
+	ConfChangeUpdateNode     ConfChangeType = 2
+	ConfChangeAddLearnerNode ConfChangeType = 3
+)
+
+var ConfChangeType_name = map[int32]string{
+	0: "ConfChangeAddNode",
+	1: "ConfChangeRemoveNode",
+	2: "ConfChangeUpdateNode",
+	3: "ConfChangeAddLearnerNode",
+}
+
+var ConfChangeType_value = map[string]int32{
+	"ConfChangeAddNode":        0,
+	"ConfChangeRemoveNode":     1,
+	"ConfChangeUpdateNode":     2,
+	"ConfChangeAddLearnerNode": 3,
+}
+
+func (x ConfChangeType) Enum() *ConfChangeType {
+	p := new(ConfChangeType)
+	*p = x
+	return p
+}
+
+func (x ConfChangeType) String() string {
+	return proto.EnumName(ConfChangeType_name, int32(x))
+}
+
+func (x *ConfChangeType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType")
+	if err != nil {
+		return err
+	}
+	*x = ConfChangeType(value)
+	return nil
+}
+
+func (ConfChangeType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{2}
+}
+
+type Entry struct {
+	Term                 uint64    `protobuf:"varint,2,opt,name=Term" json:"Term"`
+	Index                uint64    `protobuf:"varint,3,opt,name=Index" json:"Index"`
+	Type                 EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"`
+	Data                 []byte    `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Entry) Reset()         { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage()    {}
+func (*Entry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{0}
+}
+func (m *Entry) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Entry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Entry.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Entry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Entry.Merge(m, src)
+}
+func (m *Entry) XXX_Size() int {
+	return m.Size()
+}
+func (m *Entry) XXX_DiscardUnknown() {
+	xxx_messageInfo_Entry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Entry proto.InternalMessageInfo
+
+type SnapshotMetadata struct {
+	ConfState            ConfState `protobuf:"bytes,1,opt,name=conf_state,json=confState" json:"conf_state"`
+	Index                uint64    `protobuf:"varint,2,opt,name=index" json:"index"`
+	Term                 uint64    `protobuf:"varint,3,opt,name=term" json:"term"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *SnapshotMetadata) Reset()         { *m = SnapshotMetadata{} }
+func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) }
+func (*SnapshotMetadata) ProtoMessage()    {}
+func (*SnapshotMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{1}
+}
+func (m *SnapshotMetadata) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *SnapshotMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_SnapshotMetadata.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *SnapshotMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SnapshotMetadata.Merge(m, src)
+}
+func (m *SnapshotMetadata) XXX_Size() int {
+	return m.Size()
+}
+func (m *SnapshotMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_SnapshotMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SnapshotMetadata proto.InternalMessageInfo
+
+type Snapshot struct {
+	Data                 []byte           `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+	Metadata             SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Snapshot) Reset()         { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage()    {}
+func (*Snapshot) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{2}
+}
+func (m *Snapshot) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Snapshot) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Snapshot.Merge(m, src)
+}
+func (m *Snapshot) XXX_Size() int {
+	return m.Size()
+}
+func (m *Snapshot) XXX_DiscardUnknown() {
+	xxx_messageInfo_Snapshot.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Snapshot proto.InternalMessageInfo
+
+type Message struct {
+	Type                 MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"`
+	To                   uint64      `protobuf:"varint,2,opt,name=to" json:"to"`
+	From                 uint64      `protobuf:"varint,3,opt,name=from" json:"from"`
+	Term                 uint64      `protobuf:"varint,4,opt,name=term" json:"term"`
+	LogTerm              uint64      `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"`
+	Index                uint64      `protobuf:"varint,6,opt,name=index" json:"index"`
+	Entries              []Entry     `protobuf:"bytes,7,rep,name=entries" json:"entries"`
+	Commit               uint64      `protobuf:"varint,8,opt,name=commit" json:"commit"`
+	Snapshot             Snapshot    `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"`
+	Reject               bool        `protobuf:"varint,10,opt,name=reject" json:"reject"`
+	RejectHint           uint64      `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"`
+	Context              []byte      `protobuf:"bytes,12,opt,name=context" json:"context,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *Message) Reset()         { *m = Message{} }
+func (m *Message) String() string { return proto.CompactTextString(m) }
+func (*Message) ProtoMessage()    {}
+func (*Message) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{3}
+}
+func (m *Message) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *Message) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_Message.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *Message) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Message.Merge(m, src)
+}
+func (m *Message) XXX_Size() int {
+	return m.Size()
+}
+func (m *Message) XXX_DiscardUnknown() {
+	xxx_messageInfo_Message.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Message proto.InternalMessageInfo
+
+type HardState struct {
+	Term                 uint64   `protobuf:"varint,1,opt,name=term" json:"term"`
+	Vote                 uint64   `protobuf:"varint,2,opt,name=vote" json:"vote"`
+	Commit               uint64   `protobuf:"varint,3,opt,name=commit" json:"commit"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *HardState) Reset()         { *m = HardState{} }
+func (m *HardState) String() string { return proto.CompactTextString(m) }
+func (*HardState) ProtoMessage()    {}
+func (*HardState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{4}
+}
+func (m *HardState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *HardState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_HardState.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *HardState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HardState.Merge(m, src)
+}
+func (m *HardState) XXX_Size() int {
+	return m.Size()
+}
+func (m *HardState) XXX_DiscardUnknown() {
+	xxx_messageInfo_HardState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HardState proto.InternalMessageInfo
+
+type ConfState struct {
+	Nodes                []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"`
+	Learners             []uint64 `protobuf:"varint,2,rep,name=learners" json:"learners,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ConfState) Reset()         { *m = ConfState{} }
+func (m *ConfState) String() string { return proto.CompactTextString(m) }
+func (*ConfState) ProtoMessage()    {}
+func (*ConfState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{5}
+}
+func (m *ConfState) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfState.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfState.Merge(m, src)
+}
+func (m *ConfState) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfState) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfState proto.InternalMessageInfo
+
+type ConfChange struct {
+	ID                   uint64         `protobuf:"varint,1,opt,name=ID" json:"ID"`
+	Type                 ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"`
+	NodeID               uint64         `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"`
+	Context              []byte         `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *ConfChange) Reset()         { *m = ConfChange{} }
+func (m *ConfChange) String() string { return proto.CompactTextString(m) }
+func (*ConfChange) ProtoMessage()    {}
+func (*ConfChange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_b042552c306ae59b, []int{6}
+}
+func (m *ConfChange) XXX_Unmarshal(b []byte) error {
+	return m.Unmarshal(b)
+}
+func (m *ConfChange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	if deterministic {
+		return xxx_messageInfo_ConfChange.Marshal(b, m, deterministic)
+	} else {
+		b = b[:cap(b)]
+		n, err := m.MarshalToSizedBuffer(b)
+		if err != nil {
+			return nil, err
+		}
+		return b[:n], nil
+	}
+}
+func (m *ConfChange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfChange.Merge(m, src)
+}
+func (m *ConfChange) XXX_Size() int {
+	return m.Size()
+}
+func (m *ConfChange) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfChange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfChange proto.InternalMessageInfo
+
+func init() {
+	proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value)
+	proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value)
+	proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value)
+	proto.RegisterType((*Entry)(nil), "raftpb.Entry")
+	proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata")
+	proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot")
+	proto.RegisterType((*Message)(nil), "raftpb.Message")
+	proto.RegisterType((*HardState)(nil), "raftpb.HardState")
+	proto.RegisterType((*ConfState)(nil), "raftpb.ConfState")
+	proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange")
+}
+
+func init() { proto.RegisterFile("raft.proto", fileDescriptor_b042552c306ae59b) }
+
+var fileDescriptor_b042552c306ae59b = []byte{
+	// 816 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xcd, 0x6e, 0x23, 0x45,
+	0x10, 0xf6, 0x8c, 0xc7, 0x7f, 0x35, 0x8e, 0xd3, 0xa9, 0x35, 0xa8, 0x15, 0x45, 0xc6, 0xb2, 0x38,
+	0x58, 0x41, 0x1b, 0x20, 0x07, 0x0e, 0x48, 0x1c, 0x36, 0x09, 0x52, 0x22, 0xad, 0xa3, 0xc5, 0x9b,
+	0xe5, 0x80, 0x84, 0x50, 0xc7, 0x53, 0x9e, 0x18, 0x32, 0xd3, 0xa3, 0x9e, 0xf6, 0xb2, 0xb9, 0x20,
+	0x1e, 0x80, 0x07, 0xe0, 0xc2, 0xfb, 0xe4, 0xb8, 0x12, 0x77, 0xc4, 0x86, 0x17, 0x41, 0xdd, 0xd3,
+	0x63, 0xcf, 0x24, 0xb7, 0xae, 0xaf, 0x6a, 0xbe, 0xfa, 0xbe, 0xea, 0xea, 0x01, 0x50, 0x62, 0xa9,
+	0x8f, 0x32, 0x25, 0xb5, 0xc4, 0xb6, 0x39, 0x67, 0xd7, 0xfb, 0xc3, 0x58, 0xc6, 0xd2, 0x42, 0x9f,
+	0x9b, 0x53, 0x91, 0x9d, 0xfc, 0x06, 0xad, 0x6f, 0x53, 0xad, 0xee, 0x90, 0x43, 0x70, 0x45, 0x2a,
+	0xe1, 0xfe, 0xd8, 0x9b, 0x06, 0x27, 0xc1, 0xfd, 0x3f, 0x9f, 0x34, 0xe6, 0x16, 0xc1, 0x7d, 0x68,
+	0x5d, 0xa4, 0x11, 0xbd, 0xe3, 0xcd, 0x4a, 0xaa, 0x80, 0xf0, 0x33, 0x08, 0xae, 0xee, 0x32, 0xe2,
+	0xde, 0xd8, 0x9b, 0x0e, 0x8e, 0xf7, 0x8e, 0x8a, 0x5e, 0x47, 0x96, 0xd2, 0x24, 0x36, 0x44, 0x77,
+	0x19, 0x21, 0x42, 0x70, 0x26, 0xb4, 0xe0, 0xc1, 0xd8, 0x9b, 0xf6, 0xe7, 0xf6, 0x3c, 0xf9, 0xdd,
+	0x03, 0xf6, 0x3a, 0x15, 0x59, 0x7e, 0x23, 0xf5, 0x8c, 0xb4, 0x88, 0x84, 0x16, 0xf8, 0x15, 0xc0,
+	0x42, 0xa6, 0xcb, 0x9f, 0x72, 0x2d, 0x74, 0xc1, 0x1d, 0x6e, 0xb9, 0x4f, 0x65, 0xba, 0x7c, 0x6d,
+	0x12, 0x8e, 0xbb, 0xb7, 0x28, 0x01, 0xa3, 0x74, 0x65, 0x95, 0x56, 0x4d, 0x14, 0x90, 0xf1, 0xa7,
+	0x8d, 0xbf, 0xaa, 0x09, 0x8b, 0x4c, 0x7e, 0x80, 0x6e, 0xa9, 0xc0, 0x48, 0x34, 0x0a, 0x6c, 0xcf,
+	0xfe, 0xdc, 0x9e, 0xf1, 0x6b, 0xe8, 0x26, 0x4e, 0x99, 0x25, 0x0e, 0x8f, 0x79, 0xa9, 0xe5, 0xb1,
+	0x72, 0xc7, 0xbb, 0xa9, 0x9f, 0xfc, 0xd5, 0x84, 0xce, 0x8c, 0xf2, 0x5c, 0xc4, 0x84, 0xcf, 0x21,
+	0xd0, 0xdb, 0x59, 0x3d, 0x2b, 0x39, 0x5c, 0xba, 0x3a, 0x2d, 0x53, 0x86, 0x43, 0xf0, 0xb5, 0xac,
+	0x39, 0xf1, 0xb5, 0x34, 0x36, 0x96, 0x4a, 0x3e, 0xb2, 0x61, 0x90, 0x8d, 0xc1, 0xe0, 0xb1, 0x41,
+	0x1c, 0x41, 0xe7, 0x56, 0xc6, 0xf6, 0x76, 0x5b, 0x95, 0x64, 0x09, 0x6e, 0xc7, 0xd6, 0x7e, 0x3a,
+	0xb6, 0xe7, 0xd0, 0xa1, 0x54, 0xab, 0x15, 0xe5, 0xbc, 0x33, 0x6e, 0x4e, 0xc3, 0xe3, 0x9d, 0xda,
+	0x1d, 0x97, 0x54, 0xae, 0x06, 0x0f, 0xa0, 0xbd, 0x90, 0x49, 0xb2, 0xd2, 0xbc, 0x5b, 0xe1, 0x72,
+	0x18, 0x1e, 0x43, 0x37, 0x77, 0x13, 0xe3, 0x3d, 0x3b, 0x49, 0xf6, 0x78, 0x92, 0xe5, 0x04, 0xcb,
+	0x3a, 0xc3, 0xa8, 0xe8, 0x67, 0x5a, 0x68, 0x0e, 0x63, 0x6f, 0xda, 0x2d, 0x19, 0x0b, 0x0c, 0x3f,
+	0x05, 0x28, 0x4e, 0xe7, 0xab, 0x54, 0xf3, 0xb0, 0xd2, 0xb3, 0x82, 0x23, 0x87, 0xce, 0x42, 0xa6,
+	0x9a, 0xde, 0x69, 0xde, 0xb7, 0x17, 0x5b, 0x86, 0x93, 0x1f, 0xa1, 0x77, 0x2e, 0x54, 0x54, 0xac,
+	0x4f, 0x39, 0x41, 0xef, 0xc9, 0x04, 0x39, 0x04, 0x6f, 0xa5, 0xa6, 0xfa, 0xe3, 0x30, 0x48, 0xc5,
+	0x70, 0xf3, 0xa9, 0xe1, 0xc9, 0x37, 0xd0, 0xdb, 0xac, 0x2b, 0x0e, 0xa1, 0x95, 0xca, 0x88, 0x72,
+	0xee, 0x8d, 0x9b, 0xd3, 0x60, 0x5e, 0x04, 0xb8, 0x0f, 0xdd, 0x5b, 0x12, 0x2a, 0x25, 0x95, 0x73,
+	0xdf, 0x26, 0x36, 0xf1, 0xe4, 0x0f, 0x0f, 0xc0, 0x7c, 0x7f, 0x7a, 0x23, 0xd2, 0xd8, 0x6e, 0xc4,
+	0xc5, 0x59, 0x4d, 0x9d, 0x7f, 0x71, 0x86, 0x5f, 0xb8, 0x27, 0xe8, 0xdb, 0xb5, 0xfa, 0xb8, 0xfa,
+	0x4c, 0x8a, 0xef, 0x9e, 0xbc, 0xc3, 0x03, 0x68, 0x5f, 0xca, 0x88, 0x2e, 0xce, 0xea, 0x9a, 0x0b,
+	0xcc, 0x0c, 0xeb, 0xd4, 0x0d, 0xab, 0x78, 0xa8, 0x65, 0x78, 0xf8, 0x25, 0xf4, 0x36, 0x0f, 0x1b,
+	0x77, 0x21, 0xb4, 0xc1, 0xa5, 0x54, 0x89, 0xb8, 0x65, 0x0d, 0x7c, 0x06, 0xbb, 0x16, 0xd8, 0x36,
+	0x66, 0xde, 0xe1, 0xdf, 0x3e, 0x84, 0x95, 0x05, 0x47, 0x80, 0xf6, 0x2c, 0x8f, 0xcf, 0xd7, 0x19,
+	0x6b, 0x60, 0x08, 0x9d, 0x59, 0x1e, 0x9f, 0x90, 0xd0, 0xcc, 0x73, 0xc1, 0x2b, 0x25, 0x33, 0xe6,
+	0xbb, 0xaa, 0x17, 0x59, 0xc6, 0x9a, 0x38, 0x00, 0x28, 0xce, 0x73, 0xca, 0x33, 0x16, 0xb8, 0xc2,
+	0xef, 0xa5, 0x26, 0xd6, 0x32, 0x22, 0x5c, 0x60, 0xb3, 0x6d, 0x97, 0x35, 0xcb, 0xc4, 0x3a, 0xc8,
+	0xa0, 0x6f, 0x9a, 0x91, 0x50, 0xfa, 0xda, 0x74, 0xe9, 0xe2, 0x10, 0x58, 0x15, 0xb1, 0x1f, 0xf5,
+	0x10, 0x61, 0x30, 0xcb, 0xe3, 0x37, 0xa9, 0x22, 0xb1, 0xb8, 0x11, 0xd7, 0xb7, 0xc4, 0x00, 0xf7,
+	0x60, 0xc7, 0x11, 0x99, 0xcb, 0x5b, 0xe7, 0x2c, 0x74, 0x65, 0xa7, 0x37, 0xb4, 0xf8, 0xe5, 0xbb,
+	0xb5, 0x54, 0xeb, 0x84, 0xf5, 0xf1, 0x23, 0xd8, 0x9b, 0xe5, 0xf1, 0x95, 0x12, 0x69, 0xbe, 0x24,
+	0xf5, 0x92, 0x44, 0x44, 0x8a, 0xed, 0xb8, 0xaf, 0xaf, 0x56, 0x09, 0xc9, 0xb5, 0xbe, 0x94, 0xbf,
+	0xb2, 0x81, 0x13, 0x33, 0x27, 0x11, 0xd9, 0x3f, 0x27, 0xdb, 0x75, 0x62, 0x36, 0x88, 0x15, 0xc3,
+	0x9c, 0xdf, 0x57, 0x8a, 0xac, 0xc5, 0x3d, 0xd7, 0xd5, 0xc5, 0xb6, 0x06, 0x0f, 0xef, 0x60, 0x50,
+	0xbf, 0x5e, 0xa3, 0x63, 0x8b, 0xbc, 0x88, 0x22, 0x73, 0x97, 0xac, 0x81, 0x1c, 0x86, 0x5b, 0x78,
+	0x4e, 0x89, 0x7c, 0x4b, 0x36, 0xe3, 0xd5, 0x33, 0x6f, 0xb2, 0x48, 0xe8, 0x22, 0xe3, 0xe3, 0x01,
+	0xf0, 0x1a, 0xd5, 0xcb, 0x62, 0x1b, 0x6d, 0xb6, 0x79, 0xc2, 0xef, 0x3f, 0x8c, 0x1a, 0xef, 0x3f,
+	0x8c, 0x1a, 0xf7, 0x0f, 0x23, 0xef, 0xfd, 0xc3, 0xc8, 0xfb, 0xf7, 0x61, 0xe4, 0xfd, 0xf9, 0xdf,
+	0xa8, 0xf1, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x30, 0xe1, 0x02, 0x69, 0x74, 0x06, 0x00, 0x00,
+}
+
+func (m *Entry) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Entry) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Entry) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *SnapshotMetadata) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *SnapshotMetadata) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *SnapshotMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x10
+	{
+		size, err := m.ConfState.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0xa
+	return len(dAtA) - i, nil
+}
+
+func (m *Snapshot) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	{
+		size, err := m.Metadata.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x12
+	if m.Data != nil {
+		i -= len(m.Data)
+		copy(dAtA[i:], m.Data)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Data)))
+		i--
+		dAtA[i] = 0xa
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *Message) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *Message) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *Message) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x62
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.RejectHint))
+	i--
+	dAtA[i] = 0x58
+	i--
+	if m.Reject {
+		dAtA[i] = 1
+	} else {
+		dAtA[i] = 0
+	}
+	i--
+	dAtA[i] = 0x50
+	{
+		size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i])
+		if err != nil {
+			return 0, err
+		}
+		i -= size
+		i = encodeVarintRaft(dAtA, i, uint64(size))
+	}
+	i--
+	dAtA[i] = 0x4a
+	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+	i--
+	dAtA[i] = 0x40
+	if len(m.Entries) > 0 {
+		for iNdEx := len(m.Entries) - 1; iNdEx >= 0; iNdEx-- {
+			{
+				size, err := m.Entries[iNdEx].MarshalToSizedBuffer(dAtA[:i])
+				if err != nil {
+					return 0, err
+				}
+				i -= size
+				i = encodeVarintRaft(dAtA, i, uint64(size))
+			}
+			i--
+			dAtA[i] = 0x3a
+		}
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Index))
+	i--
+	dAtA[i] = 0x30
+	i = encodeVarintRaft(dAtA, i, uint64(m.LogTerm))
+	i--
+	dAtA[i] = 0x28
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x20
+	i = encodeVarintRaft(dAtA, i, uint64(m.From))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.To))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *HardState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *HardState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *HardState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.Commit))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Vote))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.Term))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfState) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfState) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfState) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if len(m.Learners) > 0 {
+		for iNdEx := len(m.Learners) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.Learners[iNdEx]))
+			i--
+			dAtA[i] = 0x10
+		}
+	}
+	if len(m.Nodes) > 0 {
+		for iNdEx := len(m.Nodes) - 1; iNdEx >= 0; iNdEx-- {
+			i = encodeVarintRaft(dAtA, i, uint64(m.Nodes[iNdEx]))
+			i--
+			dAtA[i] = 0x8
+		}
+	}
+	return len(dAtA) - i, nil
+}
+
+func (m *ConfChange) Marshal() (dAtA []byte, err error) {
+	size := m.Size()
+	dAtA = make([]byte, size)
+	n, err := m.MarshalToSizedBuffer(dAtA[:size])
+	if err != nil {
+		return nil, err
+	}
+	return dAtA[:n], nil
+}
+
+func (m *ConfChange) MarshalTo(dAtA []byte) (int, error) {
+	size := m.Size()
+	return m.MarshalToSizedBuffer(dAtA[:size])
+}
+
+func (m *ConfChange) MarshalToSizedBuffer(dAtA []byte) (int, error) {
+	i := len(dAtA)
+	_ = i
+	var l int
+	_ = l
+	if m.XXX_unrecognized != nil {
+		i -= len(m.XXX_unrecognized)
+		copy(dAtA[i:], m.XXX_unrecognized)
+	}
+	if m.Context != nil {
+		i -= len(m.Context)
+		copy(dAtA[i:], m.Context)
+		i = encodeVarintRaft(dAtA, i, uint64(len(m.Context)))
+		i--
+		dAtA[i] = 0x22
+	}
+	i = encodeVarintRaft(dAtA, i, uint64(m.NodeID))
+	i--
+	dAtA[i] = 0x18
+	i = encodeVarintRaft(dAtA, i, uint64(m.Type))
+	i--
+	dAtA[i] = 0x10
+	i = encodeVarintRaft(dAtA, i, uint64(m.ID))
+	i--
+	dAtA[i] = 0x8
+	return len(dAtA) - i, nil
+}
+
+func encodeVarintRaft(dAtA []byte, offset int, v uint64) int {
+	offset -= sovRaft(v)
+	base := offset
+	for v >= 1<<7 {
+		dAtA[offset] = uint8(v&0x7f | 0x80)
+		v >>= 7
+		offset++
+	}
+	dAtA[offset] = uint8(v)
+	return base
+}
+func (m *Entry) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Index))
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *SnapshotMetadata) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	l = m.ConfState.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 1 + sovRaft(uint64(m.Index))
+	n += 1 + sovRaft(uint64(m.Term))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Snapshot) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if m.Data != nil {
+		l = len(m.Data)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	l = m.Metadata.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *Message) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.To))
+	n += 1 + sovRaft(uint64(m.From))
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.LogTerm))
+	n += 1 + sovRaft(uint64(m.Index))
+	if len(m.Entries) > 0 {
+		for _, e := range m.Entries {
+			l = e.Size()
+			n += 1 + l + sovRaft(uint64(l))
+		}
+	}
+	n += 1 + sovRaft(uint64(m.Commit))
+	l = m.Snapshot.Size()
+	n += 1 + l + sovRaft(uint64(l))
+	n += 2
+	n += 1 + sovRaft(uint64(m.RejectHint))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *HardState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.Term))
+	n += 1 + sovRaft(uint64(m.Vote))
+	n += 1 + sovRaft(uint64(m.Commit))
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ConfState) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	if len(m.Nodes) > 0 {
+		for _, e := range m.Nodes {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if len(m.Learners) > 0 {
+		for _, e := range m.Learners {
+			n += 1 + sovRaft(uint64(e))
+		}
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func (m *ConfChange) Size() (n int) {
+	if m == nil {
+		return 0
+	}
+	var l int
+	_ = l
+	n += 1 + sovRaft(uint64(m.ID))
+	n += 1 + sovRaft(uint64(m.Type))
+	n += 1 + sovRaft(uint64(m.NodeID))
+	if m.Context != nil {
+		l = len(m.Context)
+		n += 1 + l + sovRaft(uint64(l))
+	}
+	if m.XXX_unrecognized != nil {
+		n += len(m.XXX_unrecognized)
+	}
+	return n
+}
+
+func sovRaft(x uint64) (n int) {
+	return (math_bits.Len64(x|1) + 6) / 7
+}
+func sozRaft(x uint64) (n int) {
+	return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+func (m *Entry) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Entry: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= EntryType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *SnapshotMetadata) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.ConfState.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Snapshot) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Snapshot: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
+			if m.Data == nil {
+				m.Data = []byte{}
+			}
+			iNdEx = postIndex
+		case 2:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Metadata.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *Message) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: Message: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= MessageType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field To", wireType)
+			}
+			m.To = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.To |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field From", wireType)
+			}
+			m.From = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.From |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 5:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType)
+			}
+			m.LogTerm = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.LogTerm |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 6:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType)
+			}
+			m.Index = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Index |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 7:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Entries = append(m.Entries, Entry{})
+			if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 8:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Commit |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 9:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType)
+			}
+			var msglen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				msglen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if msglen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + msglen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
+				return err
+			}
+			iNdEx = postIndex
+		case 10:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType)
+			}
+			var v int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				v |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			m.Reject = bool(v != 0)
+		case 11:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType)
+			}
+			m.RejectHint = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.RejectHint |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 12:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *HardState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: HardState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType)
+			}
+			m.Term = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Term |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType)
+			}
+			m.Vote = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Vote |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType)
+			}
+			m.Commit = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Commit |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfState) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfState: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Nodes = append(m.Nodes, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Nodes) == 0 {
+					m.Nodes = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Nodes = append(m.Nodes, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType)
+			}
+		case 2:
+			if wireType == 0 {
+				var v uint64
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					v |= uint64(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				m.Learners = append(m.Learners, v)
+			} else if wireType == 2 {
+				var packedLen int
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					packedLen |= int(b&0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				if packedLen < 0 {
+					return ErrInvalidLengthRaft
+				}
+				postIndex := iNdEx + packedLen
+				if postIndex < 0 {
+					return ErrInvalidLengthRaft
+				}
+				if postIndex > l {
+					return io.ErrUnexpectedEOF
+				}
+				var elementCount int
+				var count int
+				for _, integer := range dAtA[iNdEx:postIndex] {
+					if integer < 128 {
+						count++
+					}
+				}
+				elementCount = count
+				if elementCount != 0 && len(m.Learners) == 0 {
+					m.Learners = make([]uint64, 0, elementCount)
+				}
+				for iNdEx < postIndex {
+					var v uint64
+					for shift := uint(0); ; shift += 7 {
+						if shift >= 64 {
+							return ErrIntOverflowRaft
+						}
+						if iNdEx >= l {
+							return io.ErrUnexpectedEOF
+						}
+						b := dAtA[iNdEx]
+						iNdEx++
+						v |= uint64(b&0x7F) << shift
+						if b < 0x80 {
+							break
+						}
+					}
+					m.Learners = append(m.Learners, v)
+				}
+			} else {
+				return fmt.Errorf("proto: wrong wireType = %d for field Learners", wireType)
+			}
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func (m *ConfChange) Unmarshal(dAtA []byte) error {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		preIndex := iNdEx
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= uint64(b&0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		fieldNum := int32(wire >> 3)
+		wireType := int(wire & 0x7)
+		if wireType == 4 {
+			return fmt.Errorf("proto: ConfChange: wiretype end group for non-group")
+		}
+		if fieldNum <= 0 {
+			return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire)
+		}
+		switch fieldNum {
+		case 1:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
+			}
+			m.ID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.ID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 2:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
+			}
+			m.Type = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.Type |= ConfChangeType(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 3:
+			if wireType != 0 {
+				return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType)
+			}
+			m.NodeID = 0
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				m.NodeID |= uint64(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+		case 4:
+			if wireType != 2 {
+				return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType)
+			}
+			var byteLen int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				byteLen |= int(b&0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if byteLen < 0 {
+				return ErrInvalidLengthRaft
+			}
+			postIndex := iNdEx + byteLen
+			if postIndex < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if postIndex > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.Context = append(m.Context[:0], dAtA[iNdEx:postIndex]...)
+			if m.Context == nil {
+				m.Context = []byte{}
+			}
+			iNdEx = postIndex
+		default:
+			iNdEx = preIndex
+			skippy, err := skipRaft(dAtA[iNdEx:])
+			if err != nil {
+				return err
+			}
+			if skippy < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) < 0 {
+				return ErrInvalidLengthRaft
+			}
+			if (iNdEx + skippy) > l {
+				return io.ErrUnexpectedEOF
+			}
+			m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
+			iNdEx += skippy
+		}
+	}
+
+	if iNdEx > l {
+		return io.ErrUnexpectedEOF
+	}
+	return nil
+}
+func skipRaft(dAtA []byte) (n int, err error) {
+	l := len(dAtA)
+	iNdEx := 0
+	for iNdEx < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if shift >= 64 {
+				return 0, ErrIntOverflowRaft
+			}
+			if iNdEx >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := dAtA[iNdEx]
+			iNdEx++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				iNdEx++
+				if dAtA[iNdEx-1] < 0x80 {
+					break
+				}
+			}
+			return iNdEx, nil
+		case 1:
+			iNdEx += 8
+			return iNdEx, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if shift >= 64 {
+					return 0, ErrIntOverflowRaft
+				}
+				if iNdEx >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := dAtA[iNdEx]
+				iNdEx++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			if length < 0 {
+				return 0, ErrInvalidLengthRaft
+			}
+			iNdEx += length
+			if iNdEx < 0 {
+				return 0, ErrInvalidLengthRaft
+			}
+			return iNdEx, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = iNdEx
+				for shift := uint(0); ; shift += 7 {
+					if shift >= 64 {
+						return 0, ErrIntOverflowRaft
+					}
+					if iNdEx >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := dAtA[iNdEx]
+					iNdEx++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := skipRaft(dAtA[start:])
+				if err != nil {
+					return 0, err
+				}
+				iNdEx = start + next
+				if iNdEx < 0 {
+					return 0, ErrInvalidLengthRaft
+				}
+			}
+			return iNdEx, nil
+		case 4:
+			return iNdEx, nil
+		case 5:
+			iNdEx += 4
+			return iNdEx, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
+
+var (
+	ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling")
+	ErrIntOverflowRaft   = fmt.Errorf("proto: integer overflow")
+)
diff --git a/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
new file mode 100644
index 0000000..644ce7b
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/raftpb/raft.proto
@@ -0,0 +1,95 @@
+syntax = "proto2";
+package raftpb;
+
+import "gogoproto/gogo.proto";
+
+option (gogoproto.marshaler_all) = true;
+option (gogoproto.sizer_all) = true;
+option (gogoproto.unmarshaler_all) = true;
+option (gogoproto.goproto_getters_all) = false;
+option (gogoproto.goproto_enum_prefix_all) = false;
+
+enum EntryType {
+	EntryNormal     = 0;
+	EntryConfChange = 1;
+}
+
+message Entry {
+	optional uint64     Term  = 2 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional uint64     Index = 3 [(gogoproto.nullable) = false]; // must be 64-bit aligned for atomic operations
+	optional EntryType  Type  = 1 [(gogoproto.nullable) = false];
+	optional bytes      Data  = 4;
+}
+
+message SnapshotMetadata {
+	optional ConfState conf_state = 1 [(gogoproto.nullable) = false];
+	optional uint64    index      = 2 [(gogoproto.nullable) = false];
+	optional uint64    term       = 3 [(gogoproto.nullable) = false];
+}
+
+message Snapshot {
+	optional bytes            data     = 1;
+	optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false];
+}
+
+enum MessageType {
+	MsgHup             = 0;
+	MsgBeat            = 1;
+	MsgProp            = 2;
+	MsgApp             = 3;
+	MsgAppResp         = 4;
+	MsgVote            = 5;
+	MsgVoteResp        = 6;
+	MsgSnap            = 7;
+	MsgHeartbeat       = 8;
+	MsgHeartbeatResp   = 9;
+	MsgUnreachable     = 10;
+	MsgSnapStatus      = 11;
+	MsgCheckQuorum     = 12;
+	MsgTransferLeader  = 13;
+	MsgTimeoutNow      = 14;
+	MsgReadIndex       = 15;
+	MsgReadIndexResp   = 16;
+	MsgPreVote         = 17;
+	MsgPreVoteResp     = 18;
+}
+
+message Message {
+	optional MessageType type        = 1  [(gogoproto.nullable) = false];
+	optional uint64      to          = 2  [(gogoproto.nullable) = false];
+	optional uint64      from        = 3  [(gogoproto.nullable) = false];
+	optional uint64      term        = 4  [(gogoproto.nullable) = false];
+	optional uint64      logTerm     = 5  [(gogoproto.nullable) = false];
+	optional uint64      index       = 6  [(gogoproto.nullable) = false];
+	repeated Entry       entries     = 7  [(gogoproto.nullable) = false];
+	optional uint64      commit      = 8  [(gogoproto.nullable) = false];
+	optional Snapshot    snapshot    = 9  [(gogoproto.nullable) = false];
+	optional bool        reject      = 10 [(gogoproto.nullable) = false];
+	optional uint64      rejectHint  = 11 [(gogoproto.nullable) = false];
+	optional bytes       context     = 12;
+}
+
+message HardState {
+	optional uint64 term   = 1 [(gogoproto.nullable) = false];
+	optional uint64 vote   = 2 [(gogoproto.nullable) = false];
+	optional uint64 commit = 3 [(gogoproto.nullable) = false];
+}
+
+message ConfState {
+	repeated uint64 nodes    = 1;
+	repeated uint64 learners = 2;
+}
+
+enum ConfChangeType {
+	ConfChangeAddNode        = 0;
+	ConfChangeRemoveNode     = 1;
+	ConfChangeUpdateNode     = 2;
+	ConfChangeAddLearnerNode = 3;
+}
+
+message ConfChange {
+	optional uint64          ID      = 1 [(gogoproto.nullable) = false];
+	optional ConfChangeType  Type    = 2 [(gogoproto.nullable) = false];
+	optional uint64          NodeID  = 3 [(gogoproto.nullable) = false];
+	optional bytes           Context = 4;
+}
diff --git a/vendor/github.com/coreos/etcd/raft/rawnode.go b/vendor/github.com/coreos/etcd/raft/rawnode.go
new file mode 100644
index 0000000..925cb85
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/rawnode.go
@@ -0,0 +1,266 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrStepLocalMsg is returned when try to step a local raft message
+var ErrStepLocalMsg = errors.New("raft: cannot step raft local message")
+
+// ErrStepPeerNotFound is returned when try to step a response message
+// but there is no peer found in raft.prs for that node.
+var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found")
+
+// RawNode is a thread-unsafe Node.
+// The methods of this struct correspond to the methods of Node and are described
+// more fully there.
+type RawNode struct {
+	raft       *raft
+	prevSoftSt *SoftState
+	prevHardSt pb.HardState
+}
+
+func (rn *RawNode) newReady() Ready {
+	return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt)
+}
+
+func (rn *RawNode) commitReady(rd Ready) {
+	if rd.SoftState != nil {
+		rn.prevSoftSt = rd.SoftState
+	}
+	if !IsEmptyHardState(rd.HardState) {
+		rn.prevHardSt = rd.HardState
+	}
+	if rn.prevHardSt.Commit != 0 {
+		// In most cases, prevHardSt and rd.HardState will be the same
+		// because when there are new entries to apply we just sent a
+		// HardState with an updated Commit value. However, on initial
+		// startup the two are different because we don't send a HardState
+		// until something changes, but we do send any un-applied but
+		// committed entries (and previously-committed entries may be
+		// incorporated into the snapshot, even if rd.CommittedEntries is
+		// empty). Therefore we mark all committed entries as applied
+		// whether they were included in rd.HardState or not.
+		rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit)
+	}
+	if len(rd.Entries) > 0 {
+		e := rd.Entries[len(rd.Entries)-1]
+		rn.raft.raftLog.stableTo(e.Index, e.Term)
+	}
+	if !IsEmptySnap(rd.Snapshot) {
+		rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index)
+	}
+	if len(rd.ReadStates) != 0 {
+		rn.raft.readStates = nil
+	}
+}
+
+// NewRawNode returns a new RawNode given configuration and a list of raft peers.
+func NewRawNode(config *Config, peers []Peer) (*RawNode, error) {
+	if config.ID == 0 {
+		panic("config.ID must not be zero")
+	}
+	r := newRaft(config)
+	rn := &RawNode{
+		raft: r,
+	}
+	lastIndex, err := config.Storage.LastIndex()
+	if err != nil {
+		panic(err) // TODO(bdarnell)
+	}
+	// If the log is empty, this is a new RawNode (like StartNode); otherwise it's
+	// restoring an existing RawNode (like RestartNode).
+	// TODO(bdarnell): rethink RawNode initialization and whether the application needs
+	// to be able to tell us when it expects the RawNode to exist.
+	if lastIndex == 0 {
+		r.becomeFollower(1, None)
+		ents := make([]pb.Entry, len(peers))
+		for i, peer := range peers {
+			cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context}
+			data, err := cc.Marshal()
+			if err != nil {
+				panic("unexpected marshal error")
+			}
+
+			ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data}
+		}
+		r.raftLog.append(ents...)
+		r.raftLog.committed = uint64(len(ents))
+		for _, peer := range peers {
+			r.addNode(peer.ID)
+		}
+	}
+
+	// Set the initial hard and soft states after performing all initialization.
+	rn.prevSoftSt = r.softState()
+	if lastIndex == 0 {
+		rn.prevHardSt = emptyState
+	} else {
+		rn.prevHardSt = r.hardState()
+	}
+
+	return rn, nil
+}
+
+// Tick advances the internal logical clock by a single tick.
+func (rn *RawNode) Tick() {
+	rn.raft.tick()
+}
+
+// TickQuiesced advances the internal logical clock by a single tick without
+// performing any other state machine processing. It allows the caller to avoid
+// periodic heartbeats and elections when all of the peers in a Raft group are
+// known to be at the same state. Expected usage is to periodically invoke Tick
+// or TickQuiesced depending on whether the group is "active" or "quiesced".
+//
+// WARNING: Be very careful about using this method as it subverts the Raft
+// state machine. You should probably be using Tick instead.
+func (rn *RawNode) TickQuiesced() {
+	rn.raft.electionElapsed++
+}
+
+// Campaign causes this RawNode to transition to candidate state.
+func (rn *RawNode) Campaign() error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgHup,
+	})
+}
+
+// Propose proposes data be appended to the raft log.
+func (rn *RawNode) Propose(data []byte) error {
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		From: rn.raft.id,
+		Entries: []pb.Entry{
+			{Data: data},
+		}})
+}
+
+// ProposeConfChange proposes a config change.
+func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error {
+	data, err := cc.Marshal()
+	if err != nil {
+		return err
+	}
+	return rn.raft.Step(pb.Message{
+		Type: pb.MsgProp,
+		Entries: []pb.Entry{
+			{Type: pb.EntryConfChange, Data: data},
+		},
+	})
+}
+
+// ApplyConfChange applies a config change to the local node.
+func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState {
+	if cc.NodeID == None {
+		rn.raft.resetPendingConf()
+		return &pb.ConfState{Nodes: rn.raft.nodes()}
+	}
+	switch cc.Type {
+	case pb.ConfChangeAddNode:
+		rn.raft.addNode(cc.NodeID)
+	case pb.ConfChangeAddLearnerNode:
+		rn.raft.addLearner(cc.NodeID)
+	case pb.ConfChangeRemoveNode:
+		rn.raft.removeNode(cc.NodeID)
+	case pb.ConfChangeUpdateNode:
+		rn.raft.resetPendingConf()
+	default:
+		panic("unexpected conf type")
+	}
+	return &pb.ConfState{Nodes: rn.raft.nodes()}
+}
+
+// Step advances the state machine using the given message.
+func (rn *RawNode) Step(m pb.Message) error {
+	// ignore unexpected local messages receiving over network
+	if IsLocalMsg(m.Type) {
+		return ErrStepLocalMsg
+	}
+	if pr := rn.raft.getProgress(m.From); pr != nil || !IsResponseMsg(m.Type) {
+		return rn.raft.Step(m)
+	}
+	return ErrStepPeerNotFound
+}
+
+// Ready returns the current point-in-time state of this RawNode.
+func (rn *RawNode) Ready() Ready {
+	rd := rn.newReady()
+	rn.raft.msgs = nil
+	return rd
+}
+
+// HasReady called when RawNode user need to check if any Ready pending.
+// Checking logic in this method should be consistent with Ready.containsUpdates().
+func (rn *RawNode) HasReady() bool {
+	r := rn.raft
+	if !r.softState().equal(rn.prevSoftSt) {
+		return true
+	}
+	if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) {
+		return true
+	}
+	if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) {
+		return true
+	}
+	if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() {
+		return true
+	}
+	if len(r.readStates) != 0 {
+		return true
+	}
+	return false
+}
+
+// Advance notifies the RawNode that the application has applied and saved progress in the
+// last Ready results.
+func (rn *RawNode) Advance(rd Ready) {
+	rn.commitReady(rd)
+}
+
+// Status returns the current status of the given group.
+func (rn *RawNode) Status() *Status {
+	status := getStatus(rn.raft)
+	return &status
+}
+
+// ReportUnreachable reports the given node is not reachable for the last send.
+func (rn *RawNode) ReportUnreachable(id uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id})
+}
+
+// ReportSnapshot reports the status of the sent snapshot.
+func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) {
+	rej := status == SnapshotFailure
+
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej})
+}
+
+// TransferLeader tries to transfer leadership to the given transferee.
+func (rn *RawNode) TransferLeader(transferee uint64) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgTransferLeader, From: transferee})
+}
+
+// ReadIndex requests a read state. The read state will be set in ready.
+// Read State has a read index. Once the application advances further than the read
+// index, any linearizable read requests issued before the read request can be
+// processed safely. The read state will have the same rctx attached.
+func (rn *RawNode) ReadIndex(rctx []byte) {
+	_ = rn.raft.Step(pb.Message{Type: pb.MsgReadIndex, Entries: []pb.Entry{{Data: rctx}}})
+}
diff --git a/vendor/github.com/coreos/etcd/raft/read_only.go b/vendor/github.com/coreos/etcd/raft/read_only.go
new file mode 100644
index 0000000..ae746fa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/read_only.go
@@ -0,0 +1,118 @@
+// Copyright 2016 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import pb "github.com/coreos/etcd/raft/raftpb"
+
+// ReadState provides state for read only query.
+// It's caller's responsibility to call ReadIndex first before getting
+// this state from ready, it's also caller's duty to differentiate if this
+// state is what it requests through RequestCtx, eg. given a unique id as
+// RequestCtx
+type ReadState struct {
+	Index      uint64
+	RequestCtx []byte
+}
+
+type readIndexStatus struct {
+	req   pb.Message
+	index uint64
+	acks  map[uint64]struct{}
+}
+
+type readOnly struct {
+	option           ReadOnlyOption
+	pendingReadIndex map[string]*readIndexStatus
+	readIndexQueue   []string
+}
+
+func newReadOnly(option ReadOnlyOption) *readOnly {
+	return &readOnly{
+		option:           option,
+		pendingReadIndex: make(map[string]*readIndexStatus),
+	}
+}
+
+// addRequest adds a read only reuqest into readonly struct.
+// `index` is the commit index of the raft state machine when it received
+// the read only request.
+// `m` is the original read only request message from the local or remote node.
+func (ro *readOnly) addRequest(index uint64, m pb.Message) {
+	ctx := string(m.Entries[0].Data)
+	if _, ok := ro.pendingReadIndex[ctx]; ok {
+		return
+	}
+	ro.pendingReadIndex[ctx] = &readIndexStatus{index: index, req: m, acks: make(map[uint64]struct{})}
+	ro.readIndexQueue = append(ro.readIndexQueue, ctx)
+}
+
+// recvAck notifies the readonly struct that the raft state machine received
+// an acknowledgment of the heartbeat that attached with the read only request
+// context.
+func (ro *readOnly) recvAck(m pb.Message) int {
+	rs, ok := ro.pendingReadIndex[string(m.Context)]
+	if !ok {
+		return 0
+	}
+
+	rs.acks[m.From] = struct{}{}
+	// add one to include an ack from local node
+	return len(rs.acks) + 1
+}
+
+// advance advances the read only request queue kept by the readonly struct.
+// It dequeues the requests until it finds the read only request that has
+// the same context as the given `m`.
+func (ro *readOnly) advance(m pb.Message) []*readIndexStatus {
+	var (
+		i     int
+		found bool
+	)
+
+	ctx := string(m.Context)
+	rss := []*readIndexStatus{}
+
+	for _, okctx := range ro.readIndexQueue {
+		i++
+		rs, ok := ro.pendingReadIndex[okctx]
+		if !ok {
+			panic("cannot find corresponding read state from pending map")
+		}
+		rss = append(rss, rs)
+		if okctx == ctx {
+			found = true
+			break
+		}
+	}
+
+	if found {
+		ro.readIndexQueue = ro.readIndexQueue[i:]
+		for _, rs := range rss {
+			delete(ro.pendingReadIndex, string(rs.req.Entries[0].Data))
+		}
+		return rss
+	}
+
+	return nil
+}
+
+// lastPendingRequestCtx returns the context of the last pending read only
+// request in readonly struct.
+func (ro *readOnly) lastPendingRequestCtx() string {
+	if len(ro.readIndexQueue) == 0 {
+		return ""
+	}
+	return ro.readIndexQueue[len(ro.readIndexQueue)-1]
+}
diff --git a/vendor/github.com/coreos/etcd/raft/status.go b/vendor/github.com/coreos/etcd/raft/status.go
new file mode 100644
index 0000000..f4d3d86
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/status.go
@@ -0,0 +1,88 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+type Status struct {
+	ID uint64
+
+	pb.HardState
+	SoftState
+
+	Applied  uint64
+	Progress map[uint64]Progress
+
+	LeadTransferee uint64
+}
+
+// getStatus gets a copy of the current raft status.
+func getStatus(r *raft) Status {
+	s := Status{
+		ID:             r.id,
+		LeadTransferee: r.leadTransferee,
+	}
+
+	s.HardState = r.hardState()
+	s.SoftState = *r.softState()
+
+	s.Applied = r.raftLog.applied
+
+	if s.RaftState == StateLeader {
+		s.Progress = make(map[uint64]Progress)
+		for id, p := range r.prs {
+			s.Progress[id] = *p
+		}
+
+		for id, p := range r.learnerPrs {
+			s.Progress[id] = *p
+		}
+	}
+
+	return s
+}
+
+// MarshalJSON translates the raft status into JSON.
+// TODO: try to simplify this by introducing ID type into raft
+func (s Status) MarshalJSON() ([]byte, error) {
+	j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"applied":%d,"progress":{`,
+		s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState, s.Applied)
+
+	if len(s.Progress) == 0 {
+		j += "},"
+	} else {
+		for k, v := range s.Progress {
+			subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State)
+			j += subj
+		}
+		// remove the trailing ","
+		j = j[:len(j)-1] + "},"
+	}
+
+	j += fmt.Sprintf(`"leadtransferee":"%x"}`, s.LeadTransferee)
+	return []byte(j), nil
+}
+
+func (s Status) String() string {
+	b, err := s.MarshalJSON()
+	if err != nil {
+		raftLogger.Panicf("unexpected error: %v", err)
+	}
+	return string(b)
+}
diff --git a/vendor/github.com/coreos/etcd/raft/storage.go b/vendor/github.com/coreos/etcd/raft/storage.go
new file mode 100644
index 0000000..69c3a7d
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/storage.go
@@ -0,0 +1,271 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"errors"
+	"sync"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+// ErrCompacted is returned by Storage.Entries/Compact when a requested
+// index is unavailable because it predates the last snapshot.
+var ErrCompacted = errors.New("requested index is unavailable due to compaction")
+
+// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested
+// index is older than the existing snapshot.
+var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot")
+
+// ErrUnavailable is returned by Storage interface when the requested log entries
+// are unavailable.
+var ErrUnavailable = errors.New("requested entry at index is unavailable")
+
+// ErrSnapshotTemporarilyUnavailable is returned by the Storage interface when the required
+// snapshot is temporarily unavailable.
+var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable")
+
+// Storage is an interface that may be implemented by the application
+// to retrieve log entries from storage.
+//
+// If any Storage method returns an error, the raft instance will
+// become inoperable and refuse to participate in elections; the
+// application is responsible for cleanup and recovery in this case.
+type Storage interface {
+	// InitialState returns the saved HardState and ConfState information.
+	InitialState() (pb.HardState, pb.ConfState, error)
+	// Entries returns a slice of log entries in the range [lo,hi).
+	// MaxSize limits the total size of the log entries returned, but
+	// Entries returns at least one entry if any.
+	Entries(lo, hi, maxSize uint64) ([]pb.Entry, error)
+	// Term returns the term of entry i, which must be in the range
+	// [FirstIndex()-1, LastIndex()]. The term of the entry before
+	// FirstIndex is retained for matching purposes even though the
+	// rest of that entry may not be available.
+	Term(i uint64) (uint64, error)
+	// LastIndex returns the index of the last entry in the log.
+	LastIndex() (uint64, error)
+	// FirstIndex returns the index of the first log entry that is
+	// possibly available via Entries (older entries have been incorporated
+	// into the latest Snapshot; if storage only contains the dummy entry the
+	// first log entry is not available).
+	FirstIndex() (uint64, error)
+	// Snapshot returns the most recent snapshot.
+	// If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable,
+	// so raft state machine could know that Storage needs some time to prepare
+	// snapshot and call Snapshot later.
+	Snapshot() (pb.Snapshot, error)
+}
+
+// MemoryStorage implements the Storage interface backed by an
+// in-memory array.
+type MemoryStorage struct {
+	// Protects access to all fields. Most methods of MemoryStorage are
+	// run on the raft goroutine, but Append() is run on an application
+	// goroutine.
+	sync.Mutex
+
+	hardState pb.HardState
+	snapshot  pb.Snapshot
+	// ents[i] has raft log position i+snapshot.Metadata.Index
+	ents []pb.Entry
+}
+
+// NewMemoryStorage creates an empty MemoryStorage.
+func NewMemoryStorage() *MemoryStorage {
+	return &MemoryStorage{
+		// When starting from scratch populate the list with a dummy entry at term zero.
+		ents: make([]pb.Entry, 1),
+	}
+}
+
+// InitialState implements the Storage interface.
+func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) {
+	return ms.hardState, ms.snapshot.Metadata.ConfState, nil
+}
+
+// SetHardState saves the current HardState.
+func (ms *MemoryStorage) SetHardState(st pb.HardState) error {
+	ms.Lock()
+	defer ms.Unlock()
+	ms.hardState = st
+	return nil
+}
+
+// Entries implements the Storage interface.
+func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if lo <= offset {
+		return nil, ErrCompacted
+	}
+	if hi > ms.lastIndex()+1 {
+		raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex())
+	}
+	// only contains dummy entries.
+	if len(ms.ents) == 1 {
+		return nil, ErrUnavailable
+	}
+
+	ents := ms.ents[lo-offset : hi-offset]
+	return limitSize(ents, maxSize), nil
+}
+
+// Term implements the Storage interface.
+func (ms *MemoryStorage) Term(i uint64) (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if i < offset {
+		return 0, ErrCompacted
+	}
+	if int(i-offset) >= len(ms.ents) {
+		return 0, ErrUnavailable
+	}
+	return ms.ents[i-offset].Term, nil
+}
+
+// LastIndex implements the Storage interface.
+func (ms *MemoryStorage) LastIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.lastIndex(), nil
+}
+
+func (ms *MemoryStorage) lastIndex() uint64 {
+	return ms.ents[0].Index + uint64(len(ms.ents)) - 1
+}
+
+// FirstIndex implements the Storage interface.
+func (ms *MemoryStorage) FirstIndex() (uint64, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.firstIndex(), nil
+}
+
+func (ms *MemoryStorage) firstIndex() uint64 {
+	return ms.ents[0].Index + 1
+}
+
+// Snapshot implements the Storage interface.
+func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	return ms.snapshot, nil
+}
+
+// ApplySnapshot overwrites the contents of this Storage object with
+// those of the given snapshot.
+func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error {
+	ms.Lock()
+	defer ms.Unlock()
+
+	//handle check for old snapshot being applied
+	msIndex := ms.snapshot.Metadata.Index
+	snapIndex := snap.Metadata.Index
+	if msIndex >= snapIndex {
+		return ErrSnapOutOfDate
+	}
+
+	ms.snapshot = snap
+	ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}}
+	return nil
+}
+
+// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and
+// can be used to reconstruct the state at that point.
+// If any configuration changes have been made since the last compaction,
+// the result of the last ApplyConfChange must be passed in.
+func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) {
+	ms.Lock()
+	defer ms.Unlock()
+	if i <= ms.snapshot.Metadata.Index {
+		return pb.Snapshot{}, ErrSnapOutOfDate
+	}
+
+	offset := ms.ents[0].Index
+	if i > ms.lastIndex() {
+		raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex())
+	}
+
+	ms.snapshot.Metadata.Index = i
+	ms.snapshot.Metadata.Term = ms.ents[i-offset].Term
+	if cs != nil {
+		ms.snapshot.Metadata.ConfState = *cs
+	}
+	ms.snapshot.Data = data
+	return ms.snapshot, nil
+}
+
+// Compact discards all log entries prior to compactIndex.
+// It is the application's responsibility to not attempt to compact an index
+// greater than raftLog.applied.
+func (ms *MemoryStorage) Compact(compactIndex uint64) error {
+	ms.Lock()
+	defer ms.Unlock()
+	offset := ms.ents[0].Index
+	if compactIndex <= offset {
+		return ErrCompacted
+	}
+	if compactIndex > ms.lastIndex() {
+		raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex())
+	}
+
+	i := compactIndex - offset
+	ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i)
+	ents[0].Index = ms.ents[i].Index
+	ents[0].Term = ms.ents[i].Term
+	ents = append(ents, ms.ents[i+1:]...)
+	ms.ents = ents
+	return nil
+}
+
+// Append the new entries to storage.
+// TODO (xiangli): ensure the entries are continuous and
+// entries[0].Index > ms.entries[0].Index
+func (ms *MemoryStorage) Append(entries []pb.Entry) error {
+	if len(entries) == 0 {
+		return nil
+	}
+
+	ms.Lock()
+	defer ms.Unlock()
+
+	first := ms.firstIndex()
+	last := entries[0].Index + uint64(len(entries)) - 1
+
+	// shortcut if there is no new entry.
+	if last < first {
+		return nil
+	}
+	// truncate compacted entries
+	if first > entries[0].Index {
+		entries = entries[first-entries[0].Index:]
+	}
+
+	offset := entries[0].Index - ms.ents[0].Index
+	switch {
+	case uint64(len(ms.ents)) > offset:
+		ms.ents = append([]pb.Entry{}, ms.ents[:offset]...)
+		ms.ents = append(ms.ents, entries...)
+	case uint64(len(ms.ents)) == offset:
+		ms.ents = append(ms.ents, entries...)
+	default:
+		raftLogger.Panicf("missing log entry [last: %d, append at: %d]",
+			ms.lastIndex(), entries[0].Index)
+	}
+	return nil
+}
diff --git a/vendor/github.com/coreos/etcd/raft/util.go b/vendor/github.com/coreos/etcd/raft/util.go
new file mode 100644
index 0000000..f4141fe
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/raft/util.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package raft
+
+import (
+	"bytes"
+	"fmt"
+
+	pb "github.com/coreos/etcd/raft/raftpb"
+)
+
+func (st StateType) MarshalJSON() ([]byte, error) {
+	return []byte(fmt.Sprintf("%q", st.String())), nil
+}
+
+// uint64Slice implements sort interface
+type uint64Slice []uint64
+
+func (p uint64Slice) Len() int           { return len(p) }
+func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] }
+func (p uint64Slice) Swap(i, j int)      { p[i], p[j] = p[j], p[i] }
+
+func min(a, b uint64) uint64 {
+	if a > b {
+		return b
+	}
+	return a
+}
+
+func max(a, b uint64) uint64 {
+	if a > b {
+		return a
+	}
+	return b
+}
+
+func IsLocalMsg(msgt pb.MessageType) bool {
+	return msgt == pb.MsgHup || msgt == pb.MsgBeat || msgt == pb.MsgUnreachable ||
+		msgt == pb.MsgSnapStatus || msgt == pb.MsgCheckQuorum
+}
+
+func IsResponseMsg(msgt pb.MessageType) bool {
+	return msgt == pb.MsgAppResp || msgt == pb.MsgVoteResp || msgt == pb.MsgHeartbeatResp || msgt == pb.MsgUnreachable || msgt == pb.MsgPreVoteResp
+}
+
+// voteResponseType maps vote and prevote message types to their corresponding responses.
+func voteRespMsgType(msgt pb.MessageType) pb.MessageType {
+	switch msgt {
+	case pb.MsgVote:
+		return pb.MsgVoteResp
+	case pb.MsgPreVote:
+		return pb.MsgPreVoteResp
+	default:
+		panic(fmt.Sprintf("not a vote message: %s", msgt))
+	}
+}
+
+// EntryFormatter can be implemented by the application to provide human-readable formatting
+// of entry data. Nil is a valid EntryFormatter and will use a default format.
+type EntryFormatter func([]byte) string
+
+// DescribeMessage returns a concise human-readable description of a
+// Message for debugging.
+func DescribeMessage(m pb.Message, f EntryFormatter) string {
+	var buf bytes.Buffer
+	fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index)
+	if m.Reject {
+		fmt.Fprintf(&buf, " Rejected")
+		if m.RejectHint != 0 {
+			fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint)
+		}
+	}
+	if m.Commit != 0 {
+		fmt.Fprintf(&buf, " Commit:%d", m.Commit)
+	}
+	if len(m.Entries) > 0 {
+		fmt.Fprintf(&buf, " Entries:[")
+		for i, e := range m.Entries {
+			if i != 0 {
+				buf.WriteString(", ")
+			}
+			buf.WriteString(DescribeEntry(e, f))
+		}
+		fmt.Fprintf(&buf, "]")
+	}
+	if !IsEmptySnap(m.Snapshot) {
+		fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot)
+	}
+	return buf.String()
+}
+
+// DescribeEntry returns a concise human-readable description of an
+// Entry for debugging.
+func DescribeEntry(e pb.Entry, f EntryFormatter) string {
+	var formatted string
+	if e.Type == pb.EntryNormal && f != nil {
+		formatted = f(e.Data)
+	} else {
+		formatted = fmt.Sprintf("%q", e.Data)
+	}
+	return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted)
+}
+
+func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry {
+	if len(ents) == 0 {
+		return ents
+	}
+	size := ents[0].Size()
+	var limit int
+	for limit = 1; limit < len(ents); limit++ {
+		size += ents[limit].Size()
+		if uint64(size) > maxSize {
+			break
+		}
+	}
+	return ents[:limit]
+}
diff --git a/vendor/github.com/coreos/etcd/version/version.go b/vendor/github.com/coreos/etcd/version/version.go
new file mode 100644
index 0000000..aa96ffa
--- /dev/null
+++ b/vendor/github.com/coreos/etcd/version/version.go
@@ -0,0 +1,56 @@
+// Copyright 2015 The etcd Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package version implements etcd version parsing and contains latest version
+// information.
+package version
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/coreos/go-semver/semver"
+)
+
+var (
+	// MinClusterVersion is the min cluster version this etcd binary is compatible with.
+	MinClusterVersion = "3.0.0"
+	Version           = "3.3.25"
+	APIVersion        = "unknown"
+
+	// Git SHA Value will be set during build
+	GitSHA = "Not provided (use ./build instead of go build)"
+)
+
+func init() {
+	ver, err := semver.NewVersion(Version)
+	if err == nil {
+		APIVersion = fmt.Sprintf("%d.%d", ver.Major, ver.Minor)
+	}
+}
+
+type Versions struct {
+	Server  string `json:"etcdserver"`
+	Cluster string `json:"etcdcluster"`
+	// TODO: raft state machine version
+}
+
+// Cluster only keeps the major.minor.
+func Cluster(v string) string {
+	vs := strings.Split(v, ".")
+	if len(vs) <= 2 {
+		return v
+	}
+	return fmt.Sprintf("%s.%s", vs[0], vs[1])
+}
diff --git a/vendor/github.com/coreos/go-semver/LICENSE b/vendor/github.com/coreos/go-semver/LICENSE
new file mode 100644
index 0000000..d645695
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/LICENSE
@@ -0,0 +1,202 @@
+
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/go-semver/NOTICE b/vendor/github.com/coreos/go-semver/NOTICE
new file mode 100644
index 0000000..23a0ada
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-semver/semver/semver.go b/vendor/github.com/coreos/go-semver/semver/semver.go
new file mode 100644
index 0000000..76cf485
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/semver.go
@@ -0,0 +1,296 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Semantic Versions http://semver.org
+package semver
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type Version struct {
+	Major      int64
+	Minor      int64
+	Patch      int64
+	PreRelease PreRelease
+	Metadata   string
+}
+
+type PreRelease string
+
+func splitOff(input *string, delim string) (val string) {
+	parts := strings.SplitN(*input, delim, 2)
+
+	if len(parts) == 2 {
+		*input = parts[0]
+		val = parts[1]
+	}
+
+	return val
+}
+
+func New(version string) *Version {
+	return Must(NewVersion(version))
+}
+
+func NewVersion(version string) (*Version, error) {
+	v := Version{}
+
+	if err := v.Set(version); err != nil {
+		return nil, err
+	}
+
+	return &v, nil
+}
+
+// Must is a helper for wrapping NewVersion and will panic if err is not nil.
+func Must(v *Version, err error) *Version {
+	if err != nil {
+		panic(err)
+	}
+	return v
+}
+
+// Set parses and updates v from the given version string. Implements flag.Value
+func (v *Version) Set(version string) error {
+	metadata := splitOff(&version, "+")
+	preRelease := PreRelease(splitOff(&version, "-"))
+	dotParts := strings.SplitN(version, ".", 3)
+
+	if len(dotParts) != 3 {
+		return fmt.Errorf("%s is not in dotted-tri format", version)
+	}
+
+	if err := validateIdentifier(string(preRelease)); err != nil {
+		return fmt.Errorf("failed to validate pre-release: %v", err)
+	}
+
+	if err := validateIdentifier(metadata); err != nil {
+		return fmt.Errorf("failed to validate metadata: %v", err)
+	}
+
+	parsed := make([]int64, 3, 3)
+
+	for i, v := range dotParts[:3] {
+		val, err := strconv.ParseInt(v, 10, 64)
+		parsed[i] = val
+		if err != nil {
+			return err
+		}
+	}
+
+	v.Metadata = metadata
+	v.PreRelease = preRelease
+	v.Major = parsed[0]
+	v.Minor = parsed[1]
+	v.Patch = parsed[2]
+	return nil
+}
+
+func (v Version) String() string {
+	var buffer bytes.Buffer
+
+	fmt.Fprintf(&buffer, "%d.%d.%d", v.Major, v.Minor, v.Patch)
+
+	if v.PreRelease != "" {
+		fmt.Fprintf(&buffer, "-%s", v.PreRelease)
+	}
+
+	if v.Metadata != "" {
+		fmt.Fprintf(&buffer, "+%s", v.Metadata)
+	}
+
+	return buffer.String()
+}
+
+func (v *Version) UnmarshalYAML(unmarshal func(interface{}) error) error {
+	var data string
+	if err := unmarshal(&data); err != nil {
+		return err
+	}
+	return v.Set(data)
+}
+
+func (v Version) MarshalJSON() ([]byte, error) {
+	return []byte(`"` + v.String() + `"`), nil
+}
+
+func (v *Version) UnmarshalJSON(data []byte) error {
+	l := len(data)
+	if l == 0 || string(data) == `""` {
+		return nil
+	}
+	if l < 2 || data[0] != '"' || data[l-1] != '"' {
+		return errors.New("invalid semver string")
+	}
+	return v.Set(string(data[1 : l-1]))
+}
+
+// Compare tests if v is less than, equal to, or greater than versionB,
+// returning -1, 0, or +1 respectively.
+func (v Version) Compare(versionB Version) int {
+	if cmp := recursiveCompare(v.Slice(), versionB.Slice()); cmp != 0 {
+		return cmp
+	}
+	return preReleaseCompare(v, versionB)
+}
+
+// Equal tests if v is equal to versionB.
+func (v Version) Equal(versionB Version) bool {
+	return v.Compare(versionB) == 0
+}
+
+// LessThan tests if v is less than versionB.
+func (v Version) LessThan(versionB Version) bool {
+	return v.Compare(versionB) < 0
+}
+
+// Slice converts the comparable parts of the semver into a slice of integers.
+func (v Version) Slice() []int64 {
+	return []int64{v.Major, v.Minor, v.Patch}
+}
+
+func (p PreRelease) Slice() []string {
+	preRelease := string(p)
+	return strings.Split(preRelease, ".")
+}
+
+func preReleaseCompare(versionA Version, versionB Version) int {
+	a := versionA.PreRelease
+	b := versionB.PreRelease
+
+	/* Handle the case where if two versions are otherwise equal it is the
+	 * one without a PreRelease that is greater */
+	if len(a) == 0 && (len(b) > 0) {
+		return 1
+	} else if len(b) == 0 && (len(a) > 0) {
+		return -1
+	}
+
+	// If there is a prerelease, check and compare each part.
+	return recursivePreReleaseCompare(a.Slice(), b.Slice())
+}
+
+func recursiveCompare(versionA []int64, versionB []int64) int {
+	if len(versionA) == 0 {
+		return 0
+	}
+
+	a := versionA[0]
+	b := versionB[0]
+
+	if a > b {
+		return 1
+	} else if a < b {
+		return -1
+	}
+
+	return recursiveCompare(versionA[1:], versionB[1:])
+}
+
+func recursivePreReleaseCompare(versionA []string, versionB []string) int {
+	// A larger set of pre-release fields has a higher precedence than a smaller set,
+	// if all of the preceding identifiers are equal.
+	if len(versionA) == 0 {
+		if len(versionB) > 0 {
+			return -1
+		}
+		return 0
+	} else if len(versionB) == 0 {
+		// We're longer than versionB so return 1.
+		return 1
+	}
+
+	a := versionA[0]
+	b := versionB[0]
+
+	aInt := false
+	bInt := false
+
+	aI, err := strconv.Atoi(versionA[0])
+	if err == nil {
+		aInt = true
+	}
+
+	bI, err := strconv.Atoi(versionB[0])
+	if err == nil {
+		bInt = true
+	}
+
+	// Numeric identifiers always have lower precedence than non-numeric identifiers.
+	if aInt && !bInt {
+		return -1
+	} else if !aInt && bInt {
+		return 1
+	}
+
+	// Handle Integer Comparison
+	if aInt && bInt {
+		if aI > bI {
+			return 1
+		} else if aI < bI {
+			return -1
+		}
+	}
+
+	// Handle String Comparison
+	if a > b {
+		return 1
+	} else if a < b {
+		return -1
+	}
+
+	return recursivePreReleaseCompare(versionA[1:], versionB[1:])
+}
+
+// BumpMajor increments the Major field by 1 and resets all other fields to their default values
+func (v *Version) BumpMajor() {
+	v.Major += 1
+	v.Minor = 0
+	v.Patch = 0
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// BumpMinor increments the Minor field by 1 and resets all other fields to their default values
+func (v *Version) BumpMinor() {
+	v.Minor += 1
+	v.Patch = 0
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// BumpPatch increments the Patch field by 1 and resets all other fields to their default values
+func (v *Version) BumpPatch() {
+	v.Patch += 1
+	v.PreRelease = PreRelease("")
+	v.Metadata = ""
+}
+
+// validateIdentifier makes sure the provided identifier satisfies semver spec
+func validateIdentifier(id string) error {
+	if id != "" && !reIdentifier.MatchString(id) {
+		return fmt.Errorf("%s is not a valid semver identifier", id)
+	}
+	return nil
+}
+
+// reIdentifier is a regular expression used to check that pre-release and metadata
+// identifiers satisfy the spec requirements
+var reIdentifier = regexp.MustCompile(`^[0-9A-Za-z-]+(\.[0-9A-Za-z-]+)*$`)
diff --git a/vendor/github.com/coreos/go-semver/semver/sort.go b/vendor/github.com/coreos/go-semver/semver/sort.go
new file mode 100644
index 0000000..e256b41
--- /dev/null
+++ b/vendor/github.com/coreos/go-semver/semver/sort.go
@@ -0,0 +1,38 @@
+// Copyright 2013-2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package semver
+
+import (
+	"sort"
+)
+
+type Versions []*Version
+
+func (s Versions) Len() int {
+	return len(s)
+}
+
+func (s Versions) Swap(i, j int) {
+	s[i], s[j] = s[j], s[i]
+}
+
+func (s Versions) Less(i, j int) bool {
+	return s[i].LessThan(*s[j])
+}
+
+// Sort sorts the given slice of Version
+func Sort(versions []*Version) {
+	sort.Sort(Versions(versions))
+}
diff --git a/vendor/github.com/coreos/go-systemd/LICENSE b/vendor/github.com/coreos/go-systemd/LICENSE
new file mode 100644
index 0000000..37ec93a
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/LICENSE
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the copyright
+owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other entities
+that control, are controlled by, or are under common control with that entity.
+For the purposes of this definition, "control" means (i) the power, direct or
+indirect, to cause the direction or management of such entity, whether by
+contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
+outstanding shares, or (iii) beneficial ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications, including
+but not limited to software source code, documentation source, and configuration
+files.
+
+"Object" form shall mean any form resulting from mechanical transformation or
+translation of a Source form, including but not limited to compiled object code,
+generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form, made
+available under the License, as indicated by a copyright notice that is included
+in or attached to the work (an example is provided in the Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form, that
+is based on (or derived from) the Work and for which the editorial revisions,
+annotations, elaborations, or other modifications represent, as a whole, an
+original work of authorship. For the purposes of this License, Derivative Works
+shall not include works that remain separable from, or merely link (or bind by
+name) to the interfaces of, the Work and Derivative Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original version
+of the Work and any modifications or additions to that Work or Derivative Works
+thereof, that is intentionally submitted to Licensor for inclusion in the Work
+by the copyright owner or by an individual or Legal Entity authorized to submit
+on behalf of the copyright owner. For the purposes of this definition,
+"submitted" means any form of electronic, verbal, or written communication sent
+to the Licensor or its representatives, including but not limited to
+communication on electronic mailing lists, source code control systems, and
+issue tracking systems that are managed by, or on behalf of, the Licensor for
+the purpose of discussing and improving the Work, but excluding communication
+that is conspicuously marked or otherwise designated in writing by the copyright
+owner as "Not a Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
+of whom a Contribution has been received by Licensor and subsequently
+incorporated within the Work.
+
+2. Grant of Copyright License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable copyright license to reproduce, prepare Derivative Works of,
+publicly display, publicly perform, sublicense, and distribute the Work and such
+Derivative Works in Source or Object form.
+
+3. Grant of Patent License.
+
+Subject to the terms and conditions of this License, each Contributor hereby
+grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
+irrevocable (except as stated in this section) patent license to make, have
+made, use, offer to sell, sell, import, and otherwise transfer the Work, where
+such license applies only to those patent claims licensable by such Contributor
+that are necessarily infringed by their Contribution(s) alone or by combination
+of their Contribution(s) with the Work to which such Contribution(s) was
+submitted. If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or contributory
+patent infringement, then any patent licenses granted to You under this License
+for that Work shall terminate as of the date such litigation is filed.
+
+4. Redistribution.
+
+You may reproduce and distribute copies of the Work or Derivative Works thereof
+in any medium, with or without modifications, and in Source or Object form,
+provided that You meet the following conditions:
+
+You must give any other recipients of the Work or Derivative Works a copy of
+this License; and
+You must cause any modified files to carry prominent notices stating that You
+changed the files; and
+You must retain, in the Source form of any Derivative Works that You distribute,
+all copyright, patent, trademark, and attribution notices from the Source form
+of the Work, excluding those notices that do not pertain to any part of the
+Derivative Works; and
+If the Work includes a "NOTICE" text file as part of its distribution, then any
+Derivative Works that You distribute must include a readable copy of the
+attribution notices contained within such NOTICE file, excluding those notices
+that do not pertain to any part of the Derivative Works, in at least one of the
+following places: within a NOTICE text file distributed as part of the
+Derivative Works; within the Source form or documentation, if provided along
+with the Derivative Works; or, within a display generated by the Derivative
+Works, if and wherever such third-party notices normally appear. The contents of
+the NOTICE file are for informational purposes only and do not modify the
+License. You may add Your own attribution notices within Derivative Works that
+You distribute, alongside or as an addendum to the NOTICE text from the Work,
+provided that such additional attribution notices cannot be construed as
+modifying the License.
+You may add Your own copyright statement to Your modifications and may provide
+additional or different license terms and conditions for use, reproduction, or
+distribution of Your modifications, or for any such Derivative Works as a whole,
+provided Your use, reproduction, and distribution of the Work otherwise complies
+with the conditions stated in this License.
+
+5. Submission of Contributions.
+
+Unless You explicitly state otherwise, any Contribution intentionally submitted
+for inclusion in the Work by You to the Licensor shall be under the terms and
+conditions of this License, without any additional terms or conditions.
+Notwithstanding the above, nothing herein shall supersede or modify the terms of
+any separate license agreement you may have executed with Licensor regarding
+such Contributions.
+
+6. Trademarks.
+
+This License does not grant permission to use the trade names, trademarks,
+service marks, or product names of the Licensor, except as required for
+reasonable and customary use in describing the origin of the Work and
+reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty.
+
+Unless required by applicable law or agreed to in writing, Licensor provides the
+Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
+including, without limitation, any warranties or conditions of TITLE,
+NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
+solely responsible for determining the appropriateness of using or
+redistributing the Work and assume any risks associated with Your exercise of
+permissions under this License.
+
+8. Limitation of Liability.
+
+In no event and under no legal theory, whether in tort (including negligence),
+contract, or otherwise, unless required by applicable law (such as deliberate
+and grossly negligent acts) or agreed to in writing, shall any Contributor be
+liable to You for damages, including any direct, indirect, special, incidental,
+or consequential damages of any character arising as a result of this License or
+out of the use or inability to use the Work (including but not limited to
+damages for loss of goodwill, work stoppage, computer failure or malfunction, or
+any and all other commercial damages or losses), even if such Contributor has
+been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability.
+
+While redistributing the Work or Derivative Works thereof, You may choose to
+offer, and charge a fee for, acceptance of support, warranty, indemnity, or
+other liability obligations and/or rights consistent with this License. However,
+in accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if You
+agree to indemnify, defend, and hold each Contributor harmless for any liability
+incurred by, or claims asserted against, such Contributor by reason of your
+accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included on
+the same "printed page" as the copyright notice for easier identification within
+third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/coreos/go-systemd/NOTICE b/vendor/github.com/coreos/go-systemd/NOTICE
new file mode 100644
index 0000000..23a0ada
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2018 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/go-systemd/journal/journal.go b/vendor/github.com/coreos/go-systemd/journal/journal.go
new file mode 100644
index 0000000..a0f4837
--- /dev/null
+++ b/vendor/github.com/coreos/go-systemd/journal/journal.go
@@ -0,0 +1,225 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package journal provides write bindings to the local systemd journal.
+// It is implemented in pure Go and connects to the journal directly over its
+// unix socket.
+//
+// To read from the journal, see the "sdjournal" package, which wraps the
+// sd-journal a C API.
+//
+// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html
+package journal
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net"
+	"os"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"syscall"
+	"unsafe"
+)
+
+// Priority of a journal message
+type Priority int
+
+const (
+	PriEmerg Priority = iota
+	PriAlert
+	PriCrit
+	PriErr
+	PriWarning
+	PriNotice
+	PriInfo
+	PriDebug
+)
+
+var (
+	// This can be overridden at build-time:
+	// https://github.com/golang/go/wiki/GcToolchainTricks#including-build-information-in-the-executable
+	journalSocket = "/run/systemd/journal/socket"
+
+	// unixConnPtr atomically holds the local unconnected Unix-domain socket.
+	// Concrete safe pointer type: *net.UnixConn
+	unixConnPtr unsafe.Pointer
+	// onceConn ensures that unixConnPtr is initialized exactly once.
+	onceConn sync.Once
+)
+
+func init() {
+	onceConn.Do(initConn)
+}
+
+// Enabled checks whether the local systemd journal is available for logging.
+func Enabled() bool {
+	onceConn.Do(initConn)
+
+	if (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr)) == nil {
+		return false
+	}
+
+	if _, err := net.Dial("unixgram", journalSocket); err != nil {
+		return false
+	}
+
+	return true
+}
+
+// Send a message to the local systemd journal. vars is a map of journald
+// fields to values.  Fields must be composed of uppercase letters, numbers,
+// and underscores, but must not start with an underscore. Within these
+// restrictions, any arbitrary field name may be used.  Some names have special
+// significance: see the journalctl documentation
+// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html)
+// for more details.  vars may be nil.
+func Send(message string, priority Priority, vars map[string]string) error {
+	conn := (*net.UnixConn)(atomic.LoadPointer(&unixConnPtr))
+	if conn == nil {
+		return errors.New("could not initialize socket to journald")
+	}
+
+	socketAddr := &net.UnixAddr{
+		Name: journalSocket,
+		Net:  "unixgram",
+	}
+
+	data := new(bytes.Buffer)
+	appendVariable(data, "PRIORITY", strconv.Itoa(int(priority)))
+	appendVariable(data, "MESSAGE", message)
+	for k, v := range vars {
+		appendVariable(data, k, v)
+	}
+
+	_, _, err := conn.WriteMsgUnix(data.Bytes(), nil, socketAddr)
+	if err == nil {
+		return nil
+	}
+	if !isSocketSpaceError(err) {
+		return err
+	}
+
+	// Large log entry, send it via tempfile and ancillary-fd.
+	file, err := tempFd()
+	if err != nil {
+		return err
+	}
+	defer file.Close()
+	_, err = io.Copy(file, data)
+	if err != nil {
+		return err
+	}
+	rights := syscall.UnixRights(int(file.Fd()))
+	_, _, err = conn.WriteMsgUnix([]byte{}, rights, socketAddr)
+	if err != nil {
+		return err
+	}
+
+	return nil
+}
+
+// Print prints a message to the local systemd journal using Send().
+func Print(priority Priority, format string, a ...interface{}) error {
+	return Send(fmt.Sprintf(format, a...), priority, nil)
+}
+
+func appendVariable(w io.Writer, name, value string) {
+	if err := validVarName(name); err != nil {
+		fmt.Fprintf(os.Stderr, "variable name %s contains invalid character, ignoring\n", name)
+	}
+	if strings.ContainsRune(value, '\n') {
+		/* When the value contains a newline, we write:
+		 * - the variable name, followed by a newline
+		 * - the size (in 64bit little endian format)
+		 * - the data, followed by a newline
+		 */
+		fmt.Fprintln(w, name)
+		binary.Write(w, binary.LittleEndian, uint64(len(value)))
+		fmt.Fprintln(w, value)
+	} else {
+		/* just write the variable and value all on one line */
+		fmt.Fprintf(w, "%s=%s\n", name, value)
+	}
+}
+
+// validVarName validates a variable name to make sure journald will accept it.
+// The variable name must be in uppercase and consist only of characters,
+// numbers and underscores, and may not begin with an underscore:
+// https://www.freedesktop.org/software/systemd/man/sd_journal_print.html
+func validVarName(name string) error {
+	if name == "" {
+		return errors.New("Empty variable name")
+	} else if name[0] == '_' {
+		return errors.New("Variable name begins with an underscore")
+	}
+
+	for _, c := range name {
+		if !(('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_') {
+			return errors.New("Variable name contains invalid characters")
+		}
+	}
+	return nil
+}
+
+// isSocketSpaceError checks whether the error is signaling
+// an "overlarge message" condition.
+func isSocketSpaceError(err error) bool {
+	opErr, ok := err.(*net.OpError)
+	if !ok || opErr == nil {
+		return false
+	}
+
+	sysErr, ok := opErr.Err.(*os.SyscallError)
+	if !ok || sysErr == nil {
+		return false
+	}
+
+	return sysErr.Err == syscall.EMSGSIZE || sysErr.Err == syscall.ENOBUFS
+}
+
+// tempFd creates a temporary, unlinked file under `/dev/shm`.
+func tempFd() (*os.File, error) {
+	file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX")
+	if err != nil {
+		return nil, err
+	}
+	err = syscall.Unlink(file.Name())
+	if err != nil {
+		return nil, err
+	}
+	return file, nil
+}
+
+// initConn initializes the global `unixConnPtr` socket.
+// It is meant to be called exactly once, at program startup.
+func initConn() {
+	autobind, err := net.ResolveUnixAddr("unixgram", "")
+	if err != nil {
+		return
+	}
+
+	sock, err := net.ListenUnixgram("unixgram", autobind)
+	if err != nil {
+		return
+	}
+
+	atomic.StorePointer(&unixConnPtr, unsafe.Pointer(sock))
+}
diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE
new file mode 100644
index 0000000..e06d208
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/LICENSE
@@ -0,0 +1,202 @@
+Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright {yyyy} {name of copyright owner}
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE
new file mode 100644
index 0000000..b39ddfa
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/NOTICE
@@ -0,0 +1,5 @@
+CoreOS Project
+Copyright 2014 CoreOS, Inc
+
+This product includes software developed at CoreOS, Inc.
+(http://www.coreos.com/).
diff --git a/vendor/github.com/coreos/pkg/capnslog/README.md b/vendor/github.com/coreos/pkg/capnslog/README.md
new file mode 100644
index 0000000..f79dbfc
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/README.md
@@ -0,0 +1,39 @@
+# capnslog, the CoreOS logging package
+
+There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?).
+capnslog provides a simple but consistent logging interface suitable for all kinds of projects.
+
+### Design Principles
+
+##### `package main` is the place where logging gets turned on and routed
+
+A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak.
+
+##### All log options are runtime-configurable. 
+
+Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. 
+
+##### There is one log object per package. It is registered under its repository and package name.
+
+`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs.
+
+##### There is *one* output stream, and it is an `io.Writer` composed with a formatter.
+
+Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer.
+
+Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependent. These are, at best, provided as options, but more likely, provided by your application.
+
+##### Log objects are an interface
+
+An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed.
+
+##### Log levels have specific meanings:
+
+  * Critical: Unrecoverable. Must fail.
+  * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost
+  * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning.
+  * Notice: Normal, but important (uncommon) log information.
+  * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations.
+  * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices.
+  * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query.
+
diff --git a/vendor/github.com/coreos/pkg/capnslog/formatters.go b/vendor/github.com/coreos/pkg/capnslog/formatters.go
new file mode 100644
index 0000000..b305a84
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/formatters.go
@@ -0,0 +1,157 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"log"
+	"runtime"
+	"strings"
+	"time"
+)
+
+type Formatter interface {
+	Format(pkg string, level LogLevel, depth int, entries ...interface{})
+	Flush()
+}
+
+func NewStringFormatter(w io.Writer) Formatter {
+	return &StringFormatter{
+		w: bufio.NewWriter(w),
+	}
+}
+
+type StringFormatter struct {
+	w *bufio.Writer
+}
+
+func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) {
+	now := time.Now().UTC()
+	s.w.WriteString(now.Format(time.RFC3339))
+	s.w.WriteByte(' ')
+	writeEntries(s.w, pkg, l, i, entries...)
+	s.Flush()
+}
+
+func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) {
+	if pkg != "" {
+		w.WriteString(pkg + ": ")
+	}
+	str := fmt.Sprint(entries...)
+	endsInNL := strings.HasSuffix(str, "\n")
+	w.WriteString(str)
+	if !endsInNL {
+		w.WriteString("\n")
+	}
+}
+
+func (s *StringFormatter) Flush() {
+	s.w.Flush()
+}
+
+func NewPrettyFormatter(w io.Writer, debug bool) Formatter {
+	return &PrettyFormatter{
+		w:     bufio.NewWriter(w),
+		debug: debug,
+	}
+}
+
+type PrettyFormatter struct {
+	w     *bufio.Writer
+	debug bool
+}
+
+func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) {
+	now := time.Now()
+	ts := now.Format("2006-01-02 15:04:05")
+	c.w.WriteString(ts)
+	ms := now.Nanosecond() / 1000
+	c.w.WriteString(fmt.Sprintf(".%06d", ms))
+	if c.debug {
+		_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+		if !ok {
+			file = "???"
+			line = 1
+		} else {
+			slash := strings.LastIndex(file, "/")
+			if slash >= 0 {
+				file = file[slash+1:]
+			}
+		}
+		if line < 0 {
+			line = 0 // not a real line number
+		}
+		c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line))
+	}
+	c.w.WriteString(fmt.Sprint(" ", l.Char(), " | "))
+	writeEntries(c.w, pkg, l, depth, entries...)
+	c.Flush()
+}
+
+func (c *PrettyFormatter) Flush() {
+	c.w.Flush()
+}
+
+// LogFormatter emulates the form of the traditional built-in logger.
+type LogFormatter struct {
+	logger *log.Logger
+	prefix string
+}
+
+// NewLogFormatter is a helper to produce a new LogFormatter struct. It uses the
+// golang log package to actually do the logging work so that logs look similar.
+func NewLogFormatter(w io.Writer, prefix string, flag int) Formatter {
+	return &LogFormatter{
+		logger: log.New(w, "", flag), // don't use prefix here
+		prefix: prefix,               // save it instead
+	}
+}
+
+// Format builds a log message for the LogFormatter. The LogLevel is ignored.
+func (lf *LogFormatter) Format(pkg string, _ LogLevel, _ int, entries ...interface{}) {
+	str := fmt.Sprint(entries...)
+	prefix := lf.prefix
+	if pkg != "" {
+		prefix = fmt.Sprintf("%s%s: ", prefix, pkg)
+	}
+	lf.logger.Output(5, fmt.Sprintf("%s%v", prefix, str)) // call depth is 5
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (lf *LogFormatter) Flush() {
+	// noop
+}
+
+// NilFormatter is a no-op log formatter that does nothing.
+type NilFormatter struct {
+}
+
+// NewNilFormatter is a helper to produce a new LogFormatter struct. It logs no
+// messages so that you can cause part of your logging to be silent.
+func NewNilFormatter() Formatter {
+	return &NilFormatter{}
+}
+
+// Format does nothing.
+func (_ *NilFormatter) Format(_ string, _ LogLevel, _ int, _ ...interface{}) {
+	// noop
+}
+
+// Flush is included so that the interface is complete, but is a no-op.
+func (_ *NilFormatter) Flush() {
+	// noop
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
new file mode 100644
index 0000000..426603e
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/glog_formatter.go
@@ -0,0 +1,96 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"bufio"
+	"bytes"
+	"io"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var pid = os.Getpid()
+
+type GlogFormatter struct {
+	StringFormatter
+}
+
+func NewGlogFormatter(w io.Writer) *GlogFormatter {
+	g := &GlogFormatter{}
+	g.w = bufio.NewWriter(w)
+	return g
+}
+
+func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) {
+	g.w.Write(GlogHeader(level, depth+1))
+	g.StringFormatter.Format(pkg, level, depth+1, entries...)
+}
+
+func GlogHeader(level LogLevel, depth int) []byte {
+	// Lmmdd hh:mm:ss.uuuuuu threadid file:line]
+	now := time.Now().UTC()
+	_, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call.
+	if !ok {
+		file = "???"
+		line = 1
+	} else {
+		slash := strings.LastIndex(file, "/")
+		if slash >= 0 {
+			file = file[slash+1:]
+		}
+	}
+	if line < 0 {
+		line = 0 // not a real line number
+	}
+	buf := &bytes.Buffer{}
+	buf.Grow(30)
+	_, month, day := now.Date()
+	hour, minute, second := now.Clock()
+	buf.WriteString(level.Char())
+	twoDigits(buf, int(month))
+	twoDigits(buf, day)
+	buf.WriteByte(' ')
+	twoDigits(buf, hour)
+	buf.WriteByte(':')
+	twoDigits(buf, minute)
+	buf.WriteByte(':')
+	twoDigits(buf, second)
+	buf.WriteByte('.')
+	buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000))
+	buf.WriteByte('Z')
+	buf.WriteByte(' ')
+	buf.WriteString(strconv.Itoa(pid))
+	buf.WriteByte(' ')
+	buf.WriteString(file)
+	buf.WriteByte(':')
+	buf.WriteString(strconv.Itoa(line))
+	buf.WriteByte(']')
+	buf.WriteByte(' ')
+	return buf.Bytes()
+}
+
+const digits = "0123456789"
+
+func twoDigits(b *bytes.Buffer, d int) {
+	c2 := digits[d%10]
+	d /= 10
+	c1 := digits[d%10]
+	b.WriteByte(c1)
+	b.WriteByte(c2)
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init.go b/vendor/github.com/coreos/pkg/capnslog/init.go
new file mode 100644
index 0000000..38ce6d2
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/init.go
@@ -0,0 +1,49 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"io"
+	"os"
+	"syscall"
+)
+
+// Here's where the opinionation comes in. We need some sensible defaults,
+// especially after taking over the log package. Your project (whatever it may
+// be) may see things differently. That's okay; there should be no defaults in
+// the main package that cannot be controlled or overridden programatically,
+// otherwise it's a bug. Doing so is creating your own init_log.go file much
+// like this one.
+
+func init() {
+	initHijack()
+
+	// Go `log` package uses os.Stderr.
+	SetFormatter(NewDefaultFormatter(os.Stderr))
+	SetGlobalLogLevel(INFO)
+}
+
+func NewDefaultFormatter(out io.Writer) Formatter {
+	if syscall.Getppid() == 1 {
+		// We're running under init, which may be systemd.
+		f, err := NewJournaldFormatter()
+		if err == nil {
+			return f
+		}
+	}
+	return NewPrettyFormatter(out, false)
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
new file mode 100644
index 0000000..4553050
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/init_windows.go
@@ -0,0 +1,25 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import "os"
+
+func init() {
+	initHijack()
+
+	// Go `log` package uses os.Stderr.
+	SetFormatter(NewPrettyFormatter(os.Stderr, false))
+	SetGlobalLogLevel(INFO)
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
new file mode 100644
index 0000000..72e0520
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/journald_formatter.go
@@ -0,0 +1,68 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"errors"
+	"fmt"
+	"os"
+	"path/filepath"
+
+	"github.com/coreos/go-systemd/journal"
+)
+
+func NewJournaldFormatter() (Formatter, error) {
+	if !journal.Enabled() {
+		return nil, errors.New("No systemd detected")
+	}
+	return &journaldFormatter{}, nil
+}
+
+type journaldFormatter struct{}
+
+func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+	var pri journal.Priority
+	switch l {
+	case CRITICAL:
+		pri = journal.PriCrit
+	case ERROR:
+		pri = journal.PriErr
+	case WARNING:
+		pri = journal.PriWarning
+	case NOTICE:
+		pri = journal.PriNotice
+	case INFO:
+		pri = journal.PriInfo
+	case DEBUG:
+		pri = journal.PriDebug
+	case TRACE:
+		pri = journal.PriDebug
+	default:
+		panic("Unhandled loglevel")
+	}
+	msg := fmt.Sprint(entries...)
+	tags := map[string]string{
+		"PACKAGE":           pkg,
+		"SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]),
+	}
+	err := journal.Send(msg, pri, tags)
+	if err != nil {
+		fmt.Fprintln(os.Stderr, err)
+	}
+}
+
+func (j *journaldFormatter) Flush() {}
diff --git a/vendor/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
new file mode 100644
index 0000000..970086b
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/log_hijack.go
@@ -0,0 +1,39 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"log"
+)
+
+func initHijack() {
+	pkg := NewPackageLogger("log", "")
+	w := packageWriter{pkg}
+	log.SetFlags(0)
+	log.SetPrefix("")
+	log.SetOutput(w)
+}
+
+type packageWriter struct {
+	pl *PackageLogger
+}
+
+func (p packageWriter) Write(b []byte) (int, error) {
+	if p.pl.level < INFO {
+		return 0, nil
+	}
+	p.pl.internalLog(calldepth+2, INFO, string(b))
+	return len(b), nil
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/logmap.go b/vendor/github.com/coreos/pkg/capnslog/logmap.go
new file mode 100644
index 0000000..226b60c
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/logmap.go
@@ -0,0 +1,245 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"errors"
+	"strings"
+	"sync"
+)
+
+// LogLevel is the set of all log levels.
+type LogLevel int8
+
+const (
+	// CRITICAL is the lowest log level; only errors which will end the program will be propagated.
+	CRITICAL LogLevel = iota - 1
+	// ERROR is for errors that are not fatal but lead to troubling behavior.
+	ERROR
+	// WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations.
+	WARNING
+	// NOTICE is for normal but significant conditions.
+	NOTICE
+	// INFO is a log level for common, everyday log updates.
+	INFO
+	// DEBUG is the default hidden level for more verbose updates about internal processes.
+	DEBUG
+	// TRACE is for (potentially) call by call tracing of programs.
+	TRACE
+)
+
+// Char returns a single-character representation of the log level.
+func (l LogLevel) Char() string {
+	switch l {
+	case CRITICAL:
+		return "C"
+	case ERROR:
+		return "E"
+	case WARNING:
+		return "W"
+	case NOTICE:
+		return "N"
+	case INFO:
+		return "I"
+	case DEBUG:
+		return "D"
+	case TRACE:
+		return "T"
+	default:
+		panic("Unhandled loglevel")
+	}
+}
+
+// String returns a multi-character representation of the log level.
+func (l LogLevel) String() string {
+	switch l {
+	case CRITICAL:
+		return "CRITICAL"
+	case ERROR:
+		return "ERROR"
+	case WARNING:
+		return "WARNING"
+	case NOTICE:
+		return "NOTICE"
+	case INFO:
+		return "INFO"
+	case DEBUG:
+		return "DEBUG"
+	case TRACE:
+		return "TRACE"
+	default:
+		panic("Unhandled loglevel")
+	}
+}
+
+// Update using the given string value. Fulfills the flag.Value interface.
+func (l *LogLevel) Set(s string) error {
+	value, err := ParseLevel(s)
+	if err != nil {
+		return err
+	}
+
+	*l = value
+	return nil
+}
+
+// Returns an empty string, only here to fulfill the pflag.Value interface.
+func (l *LogLevel) Type() string {
+	return ""
+}
+
+// ParseLevel translates some potential loglevel strings into their corresponding levels.
+func ParseLevel(s string) (LogLevel, error) {
+	switch s {
+	case "CRITICAL", "C":
+		return CRITICAL, nil
+	case "ERROR", "0", "E":
+		return ERROR, nil
+	case "WARNING", "1", "W":
+		return WARNING, nil
+	case "NOTICE", "2", "N":
+		return NOTICE, nil
+	case "INFO", "3", "I":
+		return INFO, nil
+	case "DEBUG", "4", "D":
+		return DEBUG, nil
+	case "TRACE", "5", "T":
+		return TRACE, nil
+	}
+	return CRITICAL, errors.New("couldn't parse log level " + s)
+}
+
+type RepoLogger map[string]*PackageLogger
+
+type loggerStruct struct {
+	sync.Mutex
+	repoMap   map[string]RepoLogger
+	formatter Formatter
+}
+
+// logger is the global logger
+var logger = new(loggerStruct)
+
+// SetGlobalLogLevel sets the log level for all packages in all repositories
+// registered with capnslog.
+func SetGlobalLogLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	for _, r := range logger.repoMap {
+		r.setRepoLogLevelInternal(l)
+	}
+}
+
+// GetRepoLogger may return the handle to the repository's set of packages' loggers.
+func GetRepoLogger(repo string) (RepoLogger, error) {
+	logger.Lock()
+	defer logger.Unlock()
+	r, ok := logger.repoMap[repo]
+	if !ok {
+		return nil, errors.New("no packages registered for repo " + repo)
+	}
+	return r, nil
+}
+
+// MustRepoLogger returns the handle to the repository's packages' loggers.
+func MustRepoLogger(repo string) RepoLogger {
+	r, err := GetRepoLogger(repo)
+	if err != nil {
+		panic(err)
+	}
+	return r
+}
+
+// SetRepoLogLevel sets the log level for all packages in the repository.
+func (r RepoLogger) SetRepoLogLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	r.setRepoLogLevelInternal(l)
+}
+
+func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) {
+	for _, v := range r {
+		v.level = l
+	}
+}
+
+// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in
+// order, and returns a map of the results, for use in SetLogLevel.
+func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) {
+	setlist := strings.Split(conf, ",")
+	out := make(map[string]LogLevel)
+	for _, setstring := range setlist {
+		setting := strings.Split(setstring, "=")
+		if len(setting) != 2 {
+			return nil, errors.New("oddly structured `pkg=level` option: " + setstring)
+		}
+		l, err := ParseLevel(setting[1])
+		if err != nil {
+			return nil, err
+		}
+		out[setting[0]] = l
+	}
+	return out, nil
+}
+
+// SetLogLevel takes a map of package names within a repository to their desired
+// loglevel, and sets the levels appropriately. Unknown packages are ignored.
+// "*" is a special package name that corresponds to all packages, and will be
+// processed first.
+func (r RepoLogger) SetLogLevel(m map[string]LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	if l, ok := m["*"]; ok {
+		r.setRepoLogLevelInternal(l)
+	}
+	for k, v := range m {
+		l, ok := r[k]
+		if !ok {
+			continue
+		}
+		l.level = v
+	}
+}
+
+// SetFormatter sets the formatting function for all logs.
+func SetFormatter(f Formatter) {
+	logger.Lock()
+	defer logger.Unlock()
+	logger.formatter = f
+}
+
+// NewPackageLogger creates a package logger object.
+// This should be defined as a global var in your package, referencing your repo.
+func NewPackageLogger(repo string, pkg string) (p *PackageLogger) {
+	logger.Lock()
+	defer logger.Unlock()
+	if logger.repoMap == nil {
+		logger.repoMap = make(map[string]RepoLogger)
+	}
+	r, rok := logger.repoMap[repo]
+	if !rok {
+		logger.repoMap[repo] = make(RepoLogger)
+		r = logger.repoMap[repo]
+	}
+	p, pok := r[pkg]
+	if !pok {
+		r[pkg] = &PackageLogger{
+			pkg:   pkg,
+			level: INFO,
+		}
+		p = r[pkg]
+	}
+	return
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
new file mode 100644
index 0000000..00ff371
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/pkg_logger.go
@@ -0,0 +1,191 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package capnslog
+
+import (
+	"fmt"
+	"os"
+)
+
+type PackageLogger struct {
+	pkg   string
+	level LogLevel
+}
+
+const calldepth = 2
+
+func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) {
+	logger.Lock()
+	defer logger.Unlock()
+	if inLevel != CRITICAL && p.level < inLevel {
+		return
+	}
+	if logger.formatter != nil {
+		logger.formatter.Format(p.pkg, inLevel, depth+1, entries...)
+	}
+}
+
+// SetLevel allows users to change the current logging level.
+func (p *PackageLogger) SetLevel(l LogLevel) {
+	logger.Lock()
+	defer logger.Unlock()
+	p.level = l
+}
+
+// LevelAt checks if the given log level will be outputted under current setting.
+func (p *PackageLogger) LevelAt(l LogLevel) bool {
+	logger.Lock()
+	defer logger.Unlock()
+	return p.level >= l
+}
+
+// Log a formatted string at any level between ERROR and TRACE
+func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) {
+	p.internalLog(calldepth, l, fmt.Sprintf(format, args...))
+}
+
+// Log a message at any level between ERROR and TRACE
+func (p *PackageLogger) Log(l LogLevel, args ...interface{}) {
+	p.internalLog(calldepth, l, fmt.Sprint(args...))
+}
+
+// log stdlib compatibility
+
+func (p *PackageLogger) Println(args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprintln(args...))
+}
+
+func (p *PackageLogger) Printf(format string, args ...interface{}) {
+	p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Print(args ...interface{}) {
+	p.internalLog(calldepth, INFO, fmt.Sprint(args...))
+}
+
+// Panic and fatal
+
+func (p *PackageLogger) Panicf(format string, args ...interface{}) {
+	s := fmt.Sprintf(format, args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Panic(args ...interface{}) {
+	s := fmt.Sprint(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Panicln(args ...interface{}) {
+	s := fmt.Sprintln(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	panic(s)
+}
+
+func (p *PackageLogger) Fatalf(format string, args ...interface{}) {
+	p.Logf(CRITICAL, format, args...)
+	os.Exit(1)
+}
+
+func (p *PackageLogger) Fatal(args ...interface{}) {
+	s := fmt.Sprint(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	os.Exit(1)
+}
+
+func (p *PackageLogger) Fatalln(args ...interface{}) {
+	s := fmt.Sprintln(args...)
+	p.internalLog(calldepth, CRITICAL, s)
+	os.Exit(1)
+}
+
+// Error Functions
+
+func (p *PackageLogger) Errorf(format string, args ...interface{}) {
+	p.Logf(ERROR, format, args...)
+}
+
+func (p *PackageLogger) Error(entries ...interface{}) {
+	p.internalLog(calldepth, ERROR, entries...)
+}
+
+// Warning Functions
+
+func (p *PackageLogger) Warningf(format string, args ...interface{}) {
+	p.Logf(WARNING, format, args...)
+}
+
+func (p *PackageLogger) Warning(entries ...interface{}) {
+	p.internalLog(calldepth, WARNING, entries...)
+}
+
+// Notice Functions
+
+func (p *PackageLogger) Noticef(format string, args ...interface{}) {
+	p.Logf(NOTICE, format, args...)
+}
+
+func (p *PackageLogger) Notice(entries ...interface{}) {
+	p.internalLog(calldepth, NOTICE, entries...)
+}
+
+// Info Functions
+
+func (p *PackageLogger) Infof(format string, args ...interface{}) {
+	p.Logf(INFO, format, args...)
+}
+
+func (p *PackageLogger) Info(entries ...interface{}) {
+	p.internalLog(calldepth, INFO, entries...)
+}
+
+// Debug Functions
+
+func (p *PackageLogger) Debugf(format string, args ...interface{}) {
+	if p.level < DEBUG {
+		return
+	}
+	p.Logf(DEBUG, format, args...)
+}
+
+func (p *PackageLogger) Debug(entries ...interface{}) {
+	if p.level < DEBUG {
+		return
+	}
+	p.internalLog(calldepth, DEBUG, entries...)
+}
+
+// Trace Functions
+
+func (p *PackageLogger) Tracef(format string, args ...interface{}) {
+	if p.level < TRACE {
+		return
+	}
+	p.Logf(TRACE, format, args...)
+}
+
+func (p *PackageLogger) Trace(entries ...interface{}) {
+	if p.level < TRACE {
+		return
+	}
+	p.internalLog(calldepth, TRACE, entries...)
+}
+
+func (p *PackageLogger) Flush() {
+	logger.Lock()
+	defer logger.Unlock()
+	logger.formatter.Flush()
+}
diff --git a/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
new file mode 100644
index 0000000..4be5a1f
--- /dev/null
+++ b/vendor/github.com/coreos/pkg/capnslog/syslog_formatter.go
@@ -0,0 +1,65 @@
+// Copyright 2015 CoreOS, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// +build !windows
+
+package capnslog
+
+import (
+	"fmt"
+	"log/syslog"
+)
+
+func NewSyslogFormatter(w *syslog.Writer) Formatter {
+	return &syslogFormatter{w}
+}
+
+func NewDefaultSyslogFormatter(tag string) (Formatter, error) {
+	w, err := syslog.New(syslog.LOG_DEBUG, tag)
+	if err != nil {
+		return nil, err
+	}
+	return NewSyslogFormatter(w), nil
+}
+
+type syslogFormatter struct {
+	w *syslog.Writer
+}
+
+func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) {
+	for _, entry := range entries {
+		str := fmt.Sprint(entry)
+		switch l {
+		case CRITICAL:
+			s.w.Crit(str)
+		case ERROR:
+			s.w.Err(str)
+		case WARNING:
+			s.w.Warning(str)
+		case NOTICE:
+			s.w.Notice(str)
+		case INFO:
+			s.w.Info(str)
+		case DEBUG:
+			s.w.Debug(str)
+		case TRACE:
+			s.w.Debug(str)
+		default:
+			panic("Unhandled loglevel")
+		}
+	}
+}
+
+func (s *syslogFormatter) Flush() {
+}
diff --git a/vendor/github.com/dgryski/go-rendezvous/LICENSE b/vendor/github.com/dgryski/go-rendezvous/LICENSE
new file mode 100644
index 0000000..22080f7
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2017-2020 Damian Gryski <damian@gryski.com>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go
new file mode 100644
index 0000000..7a6f820
--- /dev/null
+++ b/vendor/github.com/dgryski/go-rendezvous/rdv.go
@@ -0,0 +1,79 @@
+package rendezvous
+
+type Rendezvous struct {
+	nodes map[string]int
+	nstr  []string
+	nhash []uint64
+	hash  Hasher
+}
+
+type Hasher func(s string) uint64
+
+func New(nodes []string, hash Hasher) *Rendezvous {
+	r := &Rendezvous{
+		nodes: make(map[string]int, len(nodes)),
+		nstr:  make([]string, len(nodes)),
+		nhash: make([]uint64, len(nodes)),
+		hash:  hash,
+	}
+
+	for i, n := range nodes {
+		r.nodes[n] = i
+		r.nstr[i] = n
+		r.nhash[i] = hash(n)
+	}
+
+	return r
+}
+
+func (r *Rendezvous) Lookup(k string) string {
+	// short-circuit if we're empty
+	if len(r.nodes) == 0 {
+		return ""
+	}
+
+	khash := r.hash(k)
+
+	var midx int
+	var mhash = xorshiftMult64(khash ^ r.nhash[0])
+
+	for i, nhash := range r.nhash[1:] {
+		if h := xorshiftMult64(khash ^ nhash); h > mhash {
+			midx = i + 1
+			mhash = h
+		}
+	}
+
+	return r.nstr[midx]
+}
+
+func (r *Rendezvous) Add(node string) {
+	r.nodes[node] = len(r.nstr)
+	r.nstr = append(r.nstr, node)
+	r.nhash = append(r.nhash, r.hash(node))
+}
+
+func (r *Rendezvous) Remove(node string) {
+	// find index of node to remove
+	nidx := r.nodes[node]
+
+	// remove from the slices
+	l := len(r.nstr)
+	r.nstr[nidx] = r.nstr[l]
+	r.nstr = r.nstr[:l]
+
+	r.nhash[nidx] = r.nhash[l]
+	r.nhash = r.nhash[:l]
+
+	// update the map
+	delete(r.nodes, node)
+	moved := r.nstr[nidx]
+	r.nodes[moved] = nidx
+}
+
+func xorshiftMult64(x uint64) uint64 {
+	x ^= x >> 12 // a
+	x ^= x << 25 // b
+	x ^= x >> 27 // c
+	return x * 2685821657736338717
+}
diff --git a/vendor/github.com/go-redis/redis/v8/.gitignore b/vendor/github.com/go-redis/redis/v8/.gitignore
new file mode 100644
index 0000000..b975a7b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.gitignore
@@ -0,0 +1,3 @@
+*.rdb
+testdata/*/
+.idea/
diff --git a/vendor/github.com/go-redis/redis/v8/.golangci.yml b/vendor/github.com/go-redis/redis/v8/.golangci.yml
new file mode 100644
index 0000000..de51455
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.golangci.yml
@@ -0,0 +1,4 @@
+run:
+  concurrency: 8
+  deadline: 5m
+  tests: false
diff --git a/vendor/github.com/go-redis/redis/v8/.prettierrc.yml b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
new file mode 100644
index 0000000..8b7f044
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/.prettierrc.yml
@@ -0,0 +1,4 @@
+semi: false
+singleQuote: true
+proseWrap: always
+printWidth: 100
diff --git a/vendor/github.com/go-redis/redis/v8/CHANGELOG.md b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
new file mode 100644
index 0000000..195e519
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/CHANGELOG.md
@@ -0,0 +1,177 @@
+## [8.11.5](https://github.com/go-redis/redis/compare/v8.11.4...v8.11.5) (2022-03-17)
+
+
+### Bug Fixes
+
+* add missing Expire methods to Cmdable ([17e3b43](https://github.com/go-redis/redis/commit/17e3b43879d516437ada71cf9c0deac6a382ed9a))
+* add whitespace for avoid unlikely colisions ([7f7c181](https://github.com/go-redis/redis/commit/7f7c1817617cfec909efb13d14ad22ef05a6ad4c))
+* example/otel compile error ([#2028](https://github.com/go-redis/redis/issues/2028)) ([187c07c](https://github.com/go-redis/redis/commit/187c07c41bf68dc3ab280bc3a925e960bbef6475))
+* **extra/redisotel:** set span.kind attribute to client ([065b200](https://github.com/go-redis/redis/commit/065b200070b41e6e949710b4f9e01b50ccc60ab2))
+* format ([96f53a0](https://github.com/go-redis/redis/commit/96f53a0159a28affa94beec1543a62234e7f8b32))
+* invalid type assert in stringArg ([de6c131](https://github.com/go-redis/redis/commit/de6c131865b8263400c8491777b295035f2408e4))
+* rename Golang to Go ([#2030](https://github.com/go-redis/redis/issues/2030)) ([b82a2d9](https://github.com/go-redis/redis/commit/b82a2d9d4d2de7b7cbe8fcd4895be62dbcacacbc))
+* set timeout for WAIT command. Fixes [#1963](https://github.com/go-redis/redis/issues/1963) ([333fee1](https://github.com/go-redis/redis/commit/333fee1a8fd98a2fbff1ab187c1b03246a7eb01f))
+* update some argument counts in pre-allocs ([f6974eb](https://github.com/go-redis/redis/commit/f6974ebb5c40a8adf90d2cacab6dc297f4eba4c2))
+
+
+### Features
+
+* Add redis v7's NX, XX, GT, LT expire variants ([e19bbb2](https://github.com/go-redis/redis/commit/e19bbb26e2e395c6e077b48d80d79e99f729a8b8))
+* add support for acl sentinel auth in universal client ([ab0ccc4](https://github.com/go-redis/redis/commit/ab0ccc47413f9b2a6eabc852fed5005a3ee1af6e))
+* add support for COPY command ([#2016](https://github.com/go-redis/redis/issues/2016)) ([730afbc](https://github.com/go-redis/redis/commit/730afbcffb93760e8a36cc06cfe55ab102b693a7))
+* add support for passing extra attributes added to spans ([39faaa1](https://github.com/go-redis/redis/commit/39faaa171523834ba527c9789710c4fde87f5a2e))
+* add support for time.Duration write and scan ([2f1b74e](https://github.com/go-redis/redis/commit/2f1b74e20cdd7719b2aecf0768d3e3ae7c3e781b))
+* **redisotel:** ability to override TracerProvider ([#1998](https://github.com/go-redis/redis/issues/1998)) ([bf8d4aa](https://github.com/go-redis/redis/commit/bf8d4aa60c00366cda2e98c3ddddc8cf68507417))
+* set net.peer.name and net.peer.port in otel example ([69bf454](https://github.com/go-redis/redis/commit/69bf454f706204211cd34835f76b2e8192d3766d))
+
+
+
+## [8.11.4](https://github.com/go-redis/redis/compare/v8.11.3...v8.11.4) (2021-10-04)
+
+
+### Features
+
+* add acl auth support for sentinels ([f66582f](https://github.com/go-redis/redis/commit/f66582f44f3dc3a4705a5260f982043fde4aa634))
+* add Cmd.{String,Int,Float,Bool}Slice helpers and an example ([5d3d293](https://github.com/go-redis/redis/commit/5d3d293cc9c60b90871e2420602001463708ce24))
+* add SetVal method for each command ([168981d](https://github.com/go-redis/redis/commit/168981da2d84ee9e07d15d3e74d738c162e264c4))
+
+
+
+## v8.11
+
+- Remove OpenTelemetry metrics.
+- Supports more redis commands and options.
+
+## v8.10
+
+- Removed extra OpenTelemetry spans from go-redis core. Now go-redis instrumentation only adds a
+  single span with a Redis command (instead of 4 spans). There are multiple reasons behind this
+  decision:
+
+  - Traces become smaller and less noisy.
+  - It may be costly to process those 3 extra spans for each query.
+  - go-redis no longer depends on OpenTelemetry.
+
+  Eventually we hope to replace the information that we no longer collect with OpenTelemetry
+  Metrics.
+
+## v8.9
+
+- Changed `PubSub.Channel` to only rely on `Ping` result. You can now use `WithChannelSize`,
+  `WithChannelHealthCheckInterval`, and `WithChannelSendTimeout` to override default settings.
+
+## v8.8
+
+- To make updating easier, extra modules now have the same version as go-redis does. That means that
+  you need to update your imports:
+
+```
+github.com/go-redis/redis/extra/redisotel -> github.com/go-redis/redis/extra/redisotel/v8
+github.com/go-redis/redis/extra/rediscensus -> github.com/go-redis/redis/extra/rediscensus/v8
+```
+
+## v8.5
+
+- [knadh](https://github.com/knadh) contributed long-awaited ability to scan Redis Hash into a
+  struct:
+
+```go
+err := rdb.HGetAll(ctx, "hash").Scan(&data)
+
+err := rdb.MGet(ctx, "key1", "key2").Scan(&data)
+```
+
+- Please check [redismock](https://github.com/go-redis/redismock) by
+  [monkey92t](https://github.com/monkey92t) if you are looking for mocking Redis Client.
+
+## v8
+
+- All commands require `context.Context` as a first argument, e.g. `rdb.Ping(ctx)`. If you are not
+  using `context.Context` yet, the simplest option is to define global package variable
+  `var ctx = context.TODO()` and use it when `ctx` is required.
+
+- Full support for `context.Context` canceling.
+
+- Added `redis.NewFailoverClusterClient` that supports routing read-only commands to a slave node.
+
+- Added `redisext.OpenTemetryHook` that adds
+  [Redis OpenTelemetry instrumentation](https://redis.uptrace.dev/tracing/).
+
+- Redis slow log support.
+
+- Ring uses Rendezvous Hashing by default which provides better distribution. You need to move
+  existing keys to a new location or keys will be inaccessible / lost. To use old hashing scheme:
+
+```go
+import "github.com/golang/groupcache/consistenthash"
+
+ring := redis.NewRing(&redis.RingOptions{
+    NewConsistentHash: func() {
+        return consistenthash.New(100, crc32.ChecksumIEEE)
+    },
+})
+```
+
+- `ClusterOptions.MaxRedirects` default value is changed from 8 to 3.
+- `Options.MaxRetries` default value is changed from 0 to 3.
+
+- `Cluster.ForEachNode` is renamed to `ForEachShard` for consistency with `Ring`.
+
+## v7.3
+
+- New option `Options.Username` which causes client to use `AuthACL`. Be aware if your connection
+  URL contains username.
+
+## v7.2
+
+- Existing `HMSet` is renamed to `HSet` and old deprecated `HMSet` is restored for Redis 3 users.
+
+## v7.1
+
+- Existing `Cmd.String` is renamed to `Cmd.Text`. New `Cmd.String` implements `fmt.Stringer`
+  interface.
+
+## v7
+
+- _Important_. Tx.Pipeline now returns a non-transactional pipeline. Use Tx.TxPipeline for a
+  transactional pipeline.
+- WrapProcess is replaced with more convenient AddHook that has access to context.Context.
+- WithContext now can not be used to create a shallow copy of the client.
+- New methods ProcessContext, DoContext, and ExecContext.
+- Client respects Context.Deadline when setting net.Conn deadline.
+- Client listens on Context.Done while waiting for a connection from the pool and returns an error
+  when context context is cancelled.
+- Add PubSub.ChannelWithSubscriptions that sends `*Subscription` in addition to `*Message` to allow
+  detecting reconnections.
+- `time.Time` is now marshalled in RFC3339 format. `rdb.Get("foo").Time()` helper is added to parse
+  the time.
+- `SetLimiter` is removed and added `Options.Limiter` instead.
+- `HMSet` is deprecated as of Redis v4.
+
+## v6.15
+
+- Cluster and Ring pipelines process commands for each node in its own goroutine.
+
+## 6.14
+
+- Added Options.MinIdleConns.
+- Added Options.MaxConnAge.
+- PoolStats.FreeConns is renamed to PoolStats.IdleConns.
+- Add Client.Do to simplify creating custom commands.
+- Add Cmd.String, Cmd.Int, Cmd.Int64, Cmd.Uint64, Cmd.Float64, and Cmd.Bool helpers.
+- Lower memory usage.
+
+## v6.13
+
+- Ring got new options called `HashReplicas` and `Hash`. It is recommended to set
+  `HashReplicas = 1000` for better keys distribution between shards.
+- Cluster client was optimized to use much less memory when reloading cluster state.
+- PubSub.ReceiveMessage is re-worked to not use ReceiveTimeout so it does not lose data when timeout
+  occurres. In most cases it is recommended to use PubSub.Channel instead.
+- Dialer.KeepAlive is set to 5 minutes by default.
+
+## v6.12
+
+- ClusterClient got new option called `ClusterSlots` which allows to build cluster of normal Redis
+  Servers that don't have cluster mode enabled. See
+  https://godoc.org/github.com/go-redis/redis#example-NewClusterClient--ManualSetup
diff --git a/vendor/github.com/go-redis/redis/v8/LICENSE b/vendor/github.com/go-redis/redis/v8/LICENSE
new file mode 100644
index 0000000..298bed9
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/LICENSE
@@ -0,0 +1,25 @@
+Copyright (c) 2013 The github.com/go-redis/redis Authors.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/go-redis/redis/v8/Makefile b/vendor/github.com/go-redis/redis/v8/Makefile
new file mode 100644
index 0000000..a4cfe05
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/Makefile
@@ -0,0 +1,35 @@
+PACKAGE_DIRS := $(shell find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | sort)
+
+test: testdeps
+	go test ./...
+	go test ./... -short -race
+	go test ./... -run=NONE -bench=. -benchmem
+	env GOOS=linux GOARCH=386 go test ./...
+	go vet
+
+testdeps: testdata/redis/src/redis-server
+
+bench: testdeps
+	go test ./... -test.run=NONE -test.bench=. -test.benchmem
+
+.PHONY: all test testdeps bench
+
+testdata/redis:
+	mkdir -p $@
+	wget -qO- https://download.redis.io/releases/redis-6.2.5.tar.gz | tar xvz --strip-components=1 -C $@
+
+testdata/redis/src/redis-server: testdata/redis
+	cd $< && make all
+
+fmt:
+	gofmt -w -s ./
+	goimports -w  -local github.com/go-redis/redis ./
+
+go_mod_tidy:
+	go get -u && go mod tidy
+	set -e; for dir in $(PACKAGE_DIRS); do \
+	  echo "go mod tidy in $${dir}"; \
+	  (cd "$${dir}" && \
+	    go get -u && \
+	    go mod tidy); \
+	done
diff --git a/vendor/github.com/go-redis/redis/v8/README.md b/vendor/github.com/go-redis/redis/v8/README.md
new file mode 100644
index 0000000..f3b6a01
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/README.md
@@ -0,0 +1,175 @@
+# Redis client for Go
+
+![build workflow](https://github.com/go-redis/redis/actions/workflows/build.yml/badge.svg)
+[![PkgGoDev](https://pkg.go.dev/badge/github.com/go-redis/redis/v8)](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+[![Documentation](https://img.shields.io/badge/redis-documentation-informational)](https://redis.uptrace.dev/)
+
+go-redis is brought to you by :star: [**uptrace/uptrace**](https://github.com/uptrace/uptrace).
+Uptrace is an open source and blazingly fast **distributed tracing** backend powered by
+OpenTelemetry and ClickHouse. Give it a star as well!
+
+## Resources
+
+- [Discussions](https://github.com/go-redis/redis/discussions)
+- [Documentation](https://redis.uptrace.dev)
+- [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc)
+- [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples)
+- [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app)
+
+Other projects you may like:
+
+- [Bun](https://bun.uptrace.dev) - fast and simple SQL client for PostgreSQL, MySQL, and SQLite.
+- [BunRouter](https://bunrouter.uptrace.dev/) - fast and flexible HTTP router for Go.
+
+## Ecosystem
+
+- [Redis Mock](https://github.com/go-redis/redismock)
+- [Distributed Locks](https://github.com/bsm/redislock)
+- [Redis Cache](https://github.com/go-redis/cache)
+- [Rate limiting](https://github.com/go-redis/redis_rate)
+
+## Features
+
+- Redis 3 commands except QUIT, MONITOR, and SYNC.
+- Automatic connection pooling with
+  [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support.
+- [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub).
+- [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline).
+- [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.Pipeline) and
+  [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client.TxPipeline).
+- [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script).
+- [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options).
+- [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient).
+- [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient).
+- [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient-ManualSetup)
+  without using cluster mode and Redis Sentinel.
+- [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing).
+- [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-package-Instrumentation).
+
+## Installation
+
+go-redis supports 2 last Go versions and requires a Go version with
+[modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go
+module:
+
+```shell
+go mod init github.com/my/repo
+```
+
+And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake):
+
+```shell
+go get github.com/go-redis/redis/v8
+```
+
+## Quickstart
+
+```go
+import (
+    "context"
+    "github.com/go-redis/redis/v8"
+    "fmt"
+)
+
+var ctx = context.Background()
+
+func ExampleClient() {
+    rdb := redis.NewClient(&redis.Options{
+        Addr:     "localhost:6379",
+        Password: "", // no password set
+        DB:       0,  // use default DB
+    })
+
+    err := rdb.Set(ctx, "key", "value", 0).Err()
+    if err != nil {
+        panic(err)
+    }
+
+    val, err := rdb.Get(ctx, "key").Result()
+    if err != nil {
+        panic(err)
+    }
+    fmt.Println("key", val)
+
+    val2, err := rdb.Get(ctx, "key2").Result()
+    if err == redis.Nil {
+        fmt.Println("key2 does not exist")
+    } else if err != nil {
+        panic(err)
+    } else {
+        fmt.Println("key2", val2)
+    }
+    // Output: key value
+    // key2 does not exist
+}
+```
+
+## Look and feel
+
+Some corner cases:
+
+```go
+// SET key value EX 10 NX
+set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result()
+
+// SET key value keepttl NX
+set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result()
+
+// SORT list LIMIT 0 2 ASC
+vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result()
+
+// ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2
+vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{
+    Min: "-inf",
+    Max: "+inf",
+    Offset: 0,
+    Count: 2,
+}).Result()
+
+// ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM
+vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{
+    Keys: []string{"zset1", "zset2"},
+    Weights: []int64{2, 3}
+}).Result()
+
+// EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello"
+vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result()
+
+// custom command
+res, err := rdb.Do(ctx, "set", "key", "value").Result()
+```
+
+## Run the test
+
+go-redis will start a redis-server and run the test cases.
+
+The paths of redis-server bin file and redis config file are defined in `main_test.go`:
+
+```
+var (
+	redisServerBin, _  = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server"))
+	redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf"))
+)
+```
+
+For local testing, you can change the variables to refer to your local files, or create a soft link
+to the corresponding folder for redis-server and copy the config file to `testdata/redis/`:
+
+```
+ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src
+cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/
+```
+
+Lastly, run:
+
+```
+go test
+```
+
+## Contributors
+
+Thanks to all the people who already contributed!
+
+<a href="https://github.com/go-redis/redis/graphs/contributors">
+  <img src="https://contributors-img.web.app/image?repo=go-redis/redis" />
+</a>
diff --git a/vendor/github.com/go-redis/redis/v8/RELEASING.md b/vendor/github.com/go-redis/redis/v8/RELEASING.md
new file mode 100644
index 0000000..1115db4
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/RELEASING.md
@@ -0,0 +1,15 @@
+# Releasing
+
+1. Run `release.sh` script which updates versions in go.mod files and pushes a new branch to GitHub:
+
+```shell
+TAG=v1.0.0 ./scripts/release.sh
+```
+
+2. Open a pull request and wait for the build to finish.
+
+3. Merge the pull request and run `tag.sh` to create tags for packages:
+
+```shell
+TAG=v1.0.0 ./scripts/tag.sh
+```
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
new file mode 100644
index 0000000..a54f2f3
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster.go
@@ -0,0 +1,1750 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"fmt"
+	"math"
+	"net"
+	"runtime"
+	"sort"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hashtag"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
+
+// ClusterOptions are used to configure a cluster client and should be
+// passed to NewClusterClient.
+type ClusterOptions struct {
+	// A seed list of host:port addresses of cluster nodes.
+	Addrs []string
+
+	// NewClient creates a cluster node client with provided name and options.
+	NewClient func(opt *Options) *Client
+
+	// The maximum number of retries before giving up. Command is retried
+	// on network errors and MOVED/ASK redirects.
+	// Default is 3 retries.
+	MaxRedirects int
+
+	// Enables read-only commands on slave nodes.
+	ReadOnly bool
+	// Allows routing read-only commands to the closest master or slave node.
+	// It automatically enables ReadOnly.
+	RouteByLatency bool
+	// Allows routing read-only commands to the random master or slave node.
+	// It automatically enables ReadOnly.
+	RouteRandomly bool
+
+	// Optional function that returns cluster slots information.
+	// It is useful to manually create cluster of standalone Redis servers
+	// and load-balance read/write operations between master and slaves.
+	// It can use service like ZooKeeper to maintain configuration information
+	// and Cluster.ReloadState to manually trigger state reloading.
+	ClusterSlots func(context.Context) ([]ClusterSlot, error)
+
+	// Following options are copied from Options struct.
+
+	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username string
+	Password string
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	// PoolSize applies per cluster node and not for the whole cluster.
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+}
+
+func (opt *ClusterOptions) init() {
+	if opt.MaxRedirects == -1 {
+		opt.MaxRedirects = 0
+	} else if opt.MaxRedirects == 0 {
+		opt.MaxRedirects = 3
+	}
+
+	if opt.RouteByLatency || opt.RouteRandomly {
+		opt.ReadOnly = true
+	}
+
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
+	}
+
+	switch opt.ReadTimeout {
+	case -1:
+		opt.ReadTimeout = 0
+	case 0:
+		opt.ReadTimeout = 3 * time.Second
+	}
+	switch opt.WriteTimeout {
+	case -1:
+		opt.WriteTimeout = 0
+	case 0:
+		opt.WriteTimeout = opt.ReadTimeout
+	}
+
+	if opt.MaxRetries == 0 {
+		opt.MaxRetries = -1
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+
+	if opt.NewClient == nil {
+		opt.NewClient = NewClient
+	}
+}
+
+func (opt *ClusterOptions) clientOptions() *Options {
+	const disableIdleCheck = -1
+
+	return &Options{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		Username: opt.Username,
+		Password: opt.Password,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: disableIdleCheck,
+
+		TLSConfig: opt.TLSConfig,
+		// If ClusterSlots is populated, then we probably have an artificial
+		// cluster whose nodes are not in clustering mode (otherwise there isn't
+		// much use for ClusterSlots config).  This means we cannot execute the
+		// READONLY command against that node -- setting readOnly to false in such
+		// situations in the options below will prevent that from happening.
+		readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNode struct {
+	Client *Client
+
+	latency    uint32 // atomic
+	generation uint32 // atomic
+	failing    uint32 // atomic
+}
+
+func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
+	opt := clOpt.clientOptions()
+	opt.Addr = addr
+	node := clusterNode{
+		Client: clOpt.NewClient(opt),
+	}
+
+	node.latency = math.MaxUint32
+	if clOpt.RouteByLatency {
+		go node.updateLatency()
+	}
+
+	return &node
+}
+
+func (n *clusterNode) String() string {
+	return n.Client.String()
+}
+
+func (n *clusterNode) Close() error {
+	return n.Client.Close()
+}
+
+func (n *clusterNode) updateLatency() {
+	const numProbe = 10
+	var dur uint64
+
+	for i := 0; i < numProbe; i++ {
+		time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
+
+		start := time.Now()
+		n.Client.Ping(context.TODO())
+		dur += uint64(time.Since(start) / time.Microsecond)
+	}
+
+	latency := float64(dur) / float64(numProbe)
+	atomic.StoreUint32(&n.latency, uint32(latency+0.5))
+}
+
+func (n *clusterNode) Latency() time.Duration {
+	latency := atomic.LoadUint32(&n.latency)
+	return time.Duration(latency) * time.Microsecond
+}
+
+func (n *clusterNode) MarkAsFailing() {
+	atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
+}
+
+func (n *clusterNode) Failing() bool {
+	const timeout = 15 // 15 seconds
+
+	failing := atomic.LoadUint32(&n.failing)
+	if failing == 0 {
+		return false
+	}
+	if time.Now().Unix()-int64(failing) < timeout {
+		return true
+	}
+	atomic.StoreUint32(&n.failing, 0)
+	return false
+}
+
+func (n *clusterNode) Generation() uint32 {
+	return atomic.LoadUint32(&n.generation)
+}
+
+func (n *clusterNode) SetGeneration(gen uint32) {
+	for {
+		v := atomic.LoadUint32(&n.generation)
+		if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
+			break
+		}
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type clusterNodes struct {
+	opt *ClusterOptions
+
+	mu          sync.RWMutex
+	addrs       []string
+	nodes       map[string]*clusterNode
+	activeAddrs []string
+	closed      bool
+
+	_generation uint32 // atomic
+}
+
+func newClusterNodes(opt *ClusterOptions) *clusterNodes {
+	return &clusterNodes{
+		opt: opt,
+
+		addrs: opt.Addrs,
+		nodes: make(map[string]*clusterNode),
+	}
+}
+
+func (c *clusterNodes) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, node := range c.nodes {
+		if err := node.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	c.nodes = nil
+	c.activeAddrs = nil
+
+	return firstErr
+}
+
+func (c *clusterNodes) Addrs() ([]string, error) {
+	var addrs []string
+
+	c.mu.RLock()
+	closed := c.closed //nolint:ifshort
+	if !closed {
+		if len(c.activeAddrs) > 0 {
+			addrs = c.activeAddrs
+		} else {
+			addrs = c.addrs
+		}
+	}
+	c.mu.RUnlock()
+
+	if closed {
+		return nil, pool.ErrClosed
+	}
+	if len(addrs) == 0 {
+		return nil, errClusterNoNodes
+	}
+	return addrs, nil
+}
+
+func (c *clusterNodes) NextGeneration() uint32 {
+	return atomic.AddUint32(&c._generation, 1)
+}
+
+// GC removes unused nodes.
+func (c *clusterNodes) GC(generation uint32) {
+	//nolint:prealloc
+	var collected []*clusterNode
+
+	c.mu.Lock()
+
+	c.activeAddrs = c.activeAddrs[:0]
+	for addr, node := range c.nodes {
+		if node.Generation() >= generation {
+			c.activeAddrs = append(c.activeAddrs, addr)
+			if c.opt.RouteByLatency {
+				go node.updateLatency()
+			}
+			continue
+		}
+
+		delete(c.nodes, addr)
+		collected = append(collected, node)
+	}
+
+	c.mu.Unlock()
+
+	for _, node := range collected {
+		_ = node.Client.Close()
+	}
+}
+
+func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
+	node, err := c.get(addr)
+	if err != nil {
+		return nil, err
+	}
+	if node != nil {
+		return node, nil
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	node, ok := c.nodes[addr]
+	if ok {
+		return node, nil
+	}
+
+	node = newClusterNode(c.opt, addr)
+
+	c.addrs = appendIfNotExists(c.addrs, addr)
+	c.nodes[addr] = node
+
+	return node, nil
+}
+
+func (c *clusterNodes) get(addr string) (*clusterNode, error) {
+	var node *clusterNode
+	var err error
+	c.mu.RLock()
+	if c.closed {
+		err = pool.ErrClosed
+	} else {
+		node = c.nodes[addr]
+	}
+	c.mu.RUnlock()
+	return node, err
+}
+
+func (c *clusterNodes) All() ([]*clusterNode, error) {
+	c.mu.RLock()
+	defer c.mu.RUnlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	cp := make([]*clusterNode, 0, len(c.nodes))
+	for _, node := range c.nodes {
+		cp = append(cp, node)
+	}
+	return cp, nil
+}
+
+func (c *clusterNodes) Random() (*clusterNode, error) {
+	addrs, err := c.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	n := rand.Intn(len(addrs))
+	return c.GetOrCreate(addrs[n])
+}
+
+//------------------------------------------------------------------------------
+
+type clusterSlot struct {
+	start, end int
+	nodes      []*clusterNode
+}
+
+type clusterSlotSlice []*clusterSlot
+
+func (p clusterSlotSlice) Len() int {
+	return len(p)
+}
+
+func (p clusterSlotSlice) Less(i, j int) bool {
+	return p[i].start < p[j].start
+}
+
+func (p clusterSlotSlice) Swap(i, j int) {
+	p[i], p[j] = p[j], p[i]
+}
+
+type clusterState struct {
+	nodes   *clusterNodes
+	Masters []*clusterNode
+	Slaves  []*clusterNode
+
+	slots []*clusterSlot
+
+	generation uint32
+	createdAt  time.Time
+}
+
+func newClusterState(
+	nodes *clusterNodes, slots []ClusterSlot, origin string,
+) (*clusterState, error) {
+	c := clusterState{
+		nodes: nodes,
+
+		slots: make([]*clusterSlot, 0, len(slots)),
+
+		generation: nodes.NextGeneration(),
+		createdAt:  time.Now(),
+	}
+
+	originHost, _, _ := net.SplitHostPort(origin)
+	isLoopbackOrigin := isLoopback(originHost)
+
+	for _, slot := range slots {
+		var nodes []*clusterNode
+		for i, slotNode := range slot.Nodes {
+			addr := slotNode.Addr
+			if !isLoopbackOrigin {
+				addr = replaceLoopbackHost(addr, originHost)
+			}
+
+			node, err := c.nodes.GetOrCreate(addr)
+			if err != nil {
+				return nil, err
+			}
+
+			node.SetGeneration(c.generation)
+			nodes = append(nodes, node)
+
+			if i == 0 {
+				c.Masters = appendUniqueNode(c.Masters, node)
+			} else {
+				c.Slaves = appendUniqueNode(c.Slaves, node)
+			}
+		}
+
+		c.slots = append(c.slots, &clusterSlot{
+			start: slot.Start,
+			end:   slot.End,
+			nodes: nodes,
+		})
+	}
+
+	sort.Sort(clusterSlotSlice(c.slots))
+
+	time.AfterFunc(time.Minute, func() {
+		nodes.GC(c.generation)
+	})
+
+	return &c, nil
+}
+
+func replaceLoopbackHost(nodeAddr, originHost string) string {
+	nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
+	if err != nil {
+		return nodeAddr
+	}
+
+	nodeIP := net.ParseIP(nodeHost)
+	if nodeIP == nil {
+		return nodeAddr
+	}
+
+	if !nodeIP.IsLoopback() {
+		return nodeAddr
+	}
+
+	// Use origin host which is not loopback and node port.
+	return net.JoinHostPort(originHost, nodePort)
+}
+
+func isLoopback(host string) bool {
+	ip := net.ParseIP(host)
+	if ip == nil {
+		return true
+	}
+	return ip.IsLoopback()
+}
+
+func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) > 0 {
+		return nodes[0], nil
+	}
+	return c.nodes.Random()
+}
+
+func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	switch len(nodes) {
+	case 0:
+		return c.nodes.Random()
+	case 1:
+		return nodes[0], nil
+	case 2:
+		if slave := nodes[1]; !slave.Failing() {
+			return slave, nil
+		}
+		return nodes[0], nil
+	default:
+		var slave *clusterNode
+		for i := 0; i < 10; i++ {
+			n := rand.Intn(len(nodes)-1) + 1
+			slave = nodes[n]
+			if !slave.Failing() {
+				return slave, nil
+			}
+		}
+
+		// All slaves are loading - use master.
+		return nodes[0], nil
+	}
+}
+
+func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+
+	var node *clusterNode
+	for _, n := range nodes {
+		if n.Failing() {
+			continue
+		}
+		if node == nil || n.Latency() < node.Latency() {
+			node = n
+		}
+	}
+	if node != nil {
+		return node, nil
+	}
+
+	// If all nodes are failing - return random node
+	return c.nodes.Random()
+}
+
+func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
+	nodes := c.slotNodes(slot)
+	if len(nodes) == 0 {
+		return c.nodes.Random()
+	}
+	if len(nodes) == 1 {
+		return nodes[0], nil
+	}
+	randomNodes := rand.Perm(len(nodes))
+	for _, idx := range randomNodes {
+		if node := nodes[idx]; !node.Failing() {
+			return node, nil
+		}
+	}
+	return nodes[randomNodes[0]], nil
+}
+
+func (c *clusterState) slotNodes(slot int) []*clusterNode {
+	i := sort.Search(len(c.slots), func(i int) bool {
+		return c.slots[i].end >= slot
+	})
+	if i >= len(c.slots) {
+		return nil
+	}
+	x := c.slots[i]
+	if slot >= x.start && slot <= x.end {
+		return x.nodes
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type clusterStateHolder struct {
+	load func(ctx context.Context) (*clusterState, error)
+
+	state     atomic.Value
+	reloading uint32 // atomic
+}
+
+func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
+	return &clusterStateHolder{
+		load: fn,
+	}
+}
+
+func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
+	state, err := c.load(ctx)
+	if err != nil {
+		return nil, err
+	}
+	c.state.Store(state)
+	return state, nil
+}
+
+func (c *clusterStateHolder) LazyReload() {
+	if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
+		return
+	}
+	go func() {
+		defer atomic.StoreUint32(&c.reloading, 0)
+
+		_, err := c.Reload(context.Background())
+		if err != nil {
+			return
+		}
+		time.Sleep(200 * time.Millisecond)
+	}()
+}
+
+func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
+	v := c.state.Load()
+	if v == nil {
+		return c.Reload(ctx)
+	}
+
+	state := v.(*clusterState)
+	if time.Since(state.createdAt) > 10*time.Second {
+		c.LazyReload()
+	}
+	return state, nil
+}
+
+func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
+	state, err := c.Reload(ctx)
+	if err == nil {
+		return state, nil
+	}
+	return c.Get(ctx)
+}
+
+//------------------------------------------------------------------------------
+
+type clusterClient struct {
+	opt           *ClusterOptions
+	nodes         *clusterNodes
+	state         *clusterStateHolder //nolint:structcheck
+	cmdsInfoCache *cmdsInfoCache      //nolint:structcheck
+}
+
+// ClusterClient is a Redis Cluster client representing a pool of zero
+// or more underlying connections. It's safe for concurrent use by
+// multiple goroutines.
+type ClusterClient struct {
+	*clusterClient
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+// NewClusterClient returns a Redis Cluster client as described in
+// http://redis.io/topics/cluster-spec.
+func NewClusterClient(opt *ClusterOptions) *ClusterClient {
+	opt.init()
+
+	c := &ClusterClient{
+		clusterClient: &clusterClient{
+			opt:   opt,
+			nodes: newClusterNodes(opt),
+		},
+		ctx: context.Background(),
+	}
+	c.state = newClusterStateHolder(c.loadState)
+	c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
+	c.cmdable = c.Process
+
+	if opt.IdleCheckFrequency > 0 {
+		go c.reaper(opt.IdleCheckFrequency)
+	}
+
+	return c
+}
+
+func (c *ClusterClient) Context() context.Context {
+	return c.ctx
+}
+
+func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *ClusterClient) Options() *ClusterOptions {
+	return c.opt
+}
+
+// ReloadState reloads cluster state. If available it calls ClusterSlots func
+// to get cluster slots information.
+func (c *ClusterClient) ReloadState(ctx context.Context) {
+	c.state.LazyReload()
+}
+
+// Close closes the cluster client, releasing any open resources.
+//
+// It is rare to Close a ClusterClient, as the ClusterClient is meant
+// to be long-lived and shared between many goroutines.
+func (c *ClusterClient) Close() error {
+	return c.nodes.Close()
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.process)
+}
+
+func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
+	cmdInfo := c.cmdInfo(cmd.Name())
+	slot := c.cmdSlot(cmd)
+
+	var node *clusterNode
+	var ask bool
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		if node == nil {
+			var err error
+			node, err = c.cmdNode(ctx, cmdInfo, slot)
+			if err != nil {
+				return err
+			}
+		}
+
+		if ask {
+			pipe := node.Client.Pipeline()
+			_ = pipe.Process(ctx, NewCmd(ctx, "asking"))
+			_ = pipe.Process(ctx, cmd)
+			_, lastErr = pipe.Exec(ctx)
+			_ = pipe.Close()
+			ask = false
+		} else {
+			lastErr = node.Client.Process(ctx, cmd)
+		}
+
+		// If there is no error - we are done.
+		if lastErr == nil {
+			return nil
+		}
+		if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
+			if isReadOnly {
+				c.state.LazyReload()
+			}
+			node = nil
+			continue
+		}
+
+		// If slave is loading - pick another node.
+		if c.opt.ReadOnly && isLoadingError(lastErr) {
+			node.MarkAsFailing()
+			node = nil
+			continue
+		}
+
+		var moved bool
+		var addr string
+		moved, ask, addr = isMovedError(lastErr)
+		if moved || ask {
+			c.state.LazyReload()
+
+			var err error
+			node, err = c.nodes.GetOrCreate(addr)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if shouldRetry(lastErr, cmd.readTimeout() == nil) {
+			// First retry the same node.
+			if attempt == 0 {
+				continue
+			}
+
+			// Second try another node.
+			node.MarkAsFailing()
+			node = nil
+			continue
+		}
+
+		return lastErr
+	}
+	return lastErr
+}
+
+// ForEachMaster concurrently calls the fn on each master node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachMaster(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	state, err := c.state.ReloadOrGet(ctx)
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+
+	for _, master := range state.Masters {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(ctx, node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(master)
+	}
+
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachSlave concurrently calls the fn on each slave node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachSlave(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	state, err := c.state.ReloadOrGet(ctx)
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+
+	for _, slave := range state.Slaves {
+		wg.Add(1)
+		go func(node *clusterNode) {
+			defer wg.Done()
+			err := fn(ctx, node.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(slave)
+	}
+
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// ForEachShard concurrently calls the fn on each known node in the cluster.
+// It returns the first error if any.
+func (c *ClusterClient) ForEachShard(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	state, err := c.state.ReloadOrGet(ctx)
+	if err != nil {
+		return err
+	}
+
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+
+	worker := func(node *clusterNode) {
+		defer wg.Done()
+		err := fn(ctx, node.Client)
+		if err != nil {
+			select {
+			case errCh <- err:
+			default:
+			}
+		}
+	}
+
+	for _, node := range state.Masters {
+		wg.Add(1)
+		go worker(node)
+	}
+	for _, node := range state.Slaves {
+		wg.Add(1)
+		go worker(node)
+	}
+
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *ClusterClient) PoolStats() *PoolStats {
+	var acc PoolStats
+
+	state, _ := c.state.Get(context.TODO())
+	if state == nil {
+		return &acc
+	}
+
+	for _, node := range state.Masters {
+		s := node.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+		acc.StaleConns += s.StaleConns
+	}
+
+	for _, node := range state.Slaves {
+		s := node.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+		acc.StaleConns += s.StaleConns
+	}
+
+	return &acc
+}
+
+func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
+	if c.opt.ClusterSlots != nil {
+		slots, err := c.opt.ClusterSlots(ctx)
+		if err != nil {
+			return nil, err
+		}
+		return newClusterState(c.nodes, slots, "")
+	}
+
+	addrs, err := c.nodes.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	var firstErr error
+
+	for _, idx := range rand.Perm(len(addrs)) {
+		addr := addrs[idx]
+
+		node, err := c.nodes.GetOrCreate(addr)
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		slots, err := node.Client.ClusterSlots(ctx).Result()
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		return newClusterState(c.nodes, slots, node.Client.opt.Addr)
+	}
+
+	/*
+	 * No node is connectable. It's possible that all nodes' IP has changed.
+	 * Clear activeAddrs to let client be able to re-connect using the initial
+	 * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
+	 * which might have chance to resolve domain name and get updated IP address.
+	 */
+	c.nodes.mu.Lock()
+	c.nodes.activeAddrs = nil
+	c.nodes.mu.Unlock()
+
+	return nil, firstErr
+}
+
+// reaper closes idle connections to the cluster.
+func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
+	ticker := time.NewTicker(idleCheckFrequency)
+	defer ticker.Stop()
+
+	for range ticker.C {
+		nodes, err := c.nodes.All()
+		if err != nil {
+			break
+		}
+
+		for _, node := range nodes {
+			_, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
+			if err != nil {
+				internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
+			}
+		}
+	}
+}
+
+func (c *ClusterClient) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
+}
+
+func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
+	cmdsMap := newCmdsMap()
+	err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				setCmdsErr(cmds, err)
+				return err
+			}
+		}
+
+		failedCmds := newCmdsMap()
+		var wg sync.WaitGroup
+
+		for node, cmds := range cmdsMap.m {
+			wg.Add(1)
+			go func(node *clusterNode, cmds []Cmder) {
+				defer wg.Done()
+
+				err := c._processPipelineNode(ctx, node, cmds, failedCmds)
+				if err == nil {
+					return
+				}
+				if attempt < c.opt.MaxRedirects {
+					if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+						setCmdsErr(cmds, err)
+					}
+				} else {
+					setCmdsErr(cmds, err)
+				}
+			}(node, cmds)
+		}
+
+		wg.Wait()
+		if len(failedCmds.m) == 0 {
+			break
+		}
+		cmdsMap = failedCmds
+	}
+
+	return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return err
+	}
+
+	if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
+		for _, cmd := range cmds {
+			slot := c.cmdSlot(cmd)
+			node, err := c.slotReadOnlyNode(state, slot)
+			if err != nil {
+				return err
+			}
+			cmdsMap.Add(node, cmd)
+		}
+		return nil
+	}
+
+	for _, cmd := range cmds {
+		slot := c.cmdSlot(cmd)
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			return err
+		}
+		cmdsMap.Add(node, cmd)
+	}
+	return nil
+}
+
+func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(cmd.Name())
+		if cmdInfo == nil || !cmdInfo.ReadOnly {
+			return false
+		}
+	}
+	return true
+}
+
+func (c *ClusterClient) _processPipelineNode(
+	ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+	return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+				return writeCmds(wr, cmds)
+			})
+			if err != nil {
+				return err
+			}
+
+			return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+				return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
+			})
+		})
+	})
+}
+
+func (c *ClusterClient) pipelineReadCmds(
+	ctx context.Context,
+	node *clusterNode,
+	rd *proto.Reader,
+	cmds []Cmder,
+	failedCmds *cmdsMap,
+) error {
+	for _, cmd := range cmds {
+		err := cmd.readReply(rd)
+		cmd.SetErr(err)
+
+		if err == nil {
+			continue
+		}
+
+		if c.checkMovedErr(ctx, cmd, err, failedCmds) {
+			continue
+		}
+
+		if c.opt.ReadOnly && isLoadingError(err) {
+			node.MarkAsFailing()
+			return err
+		}
+		if isRedisError(err) {
+			continue
+		}
+		return err
+	}
+	return nil
+}
+
+func (c *ClusterClient) checkMovedErr(
+	ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
+) bool {
+	moved, ask, addr := isMovedError(err)
+	if !moved && !ask {
+		return false
+	}
+
+	node, err := c.nodes.GetOrCreate(addr)
+	if err != nil {
+		return false
+	}
+
+	if moved {
+		c.state.LazyReload()
+		failedCmds.Add(node, cmd)
+		return true
+	}
+
+	if ask {
+		failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+		return true
+	}
+
+	panic("not reached")
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *ClusterClient) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
+}
+
+func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	// Trim multi .. exec.
+	cmds = cmds[1 : len(cmds)-1]
+
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	cmdsMap := c.mapCmdsBySlot(cmds)
+	for slot, cmds := range cmdsMap {
+		node, err := state.slotMasterNode(slot)
+		if err != nil {
+			setCmdsErr(cmds, err)
+			continue
+		}
+
+		cmdsMap := map[*clusterNode][]Cmder{node: cmds}
+		for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+			if attempt > 0 {
+				if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+					setCmdsErr(cmds, err)
+					return err
+				}
+			}
+
+			failedCmds := newCmdsMap()
+			var wg sync.WaitGroup
+
+			for node, cmds := range cmdsMap {
+				wg.Add(1)
+				go func(node *clusterNode, cmds []Cmder) {
+					defer wg.Done()
+
+					err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
+					if err == nil {
+						return
+					}
+
+					if attempt < c.opt.MaxRedirects {
+						if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
+							setCmdsErr(cmds, err)
+						}
+					} else {
+						setCmdsErr(cmds, err)
+					}
+				}(node, cmds)
+			}
+
+			wg.Wait()
+			if len(failedCmds.m) == 0 {
+				break
+			}
+			cmdsMap = failedCmds.m
+		}
+	}
+
+	return cmdsFirstErr(cmds)
+}
+
+func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
+	cmdsMap := make(map[int][]Cmder)
+	for _, cmd := range cmds {
+		slot := c.cmdSlot(cmd)
+		cmdsMap[slot] = append(cmdsMap[slot], cmd)
+	}
+	return cmdsMap
+}
+
+func (c *ClusterClient) _processTxPipelineNode(
+	ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
+) error {
+	return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+				return writeCmds(wr, cmds)
+			})
+			if err != nil {
+				return err
+			}
+
+			return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+				statusCmd := cmds[0].(*StatusCmd)
+				// Trim multi and exec.
+				cmds = cmds[1 : len(cmds)-1]
+
+				err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
+				if err != nil {
+					moved, ask, addr := isMovedError(err)
+					if moved || ask {
+						return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
+					}
+					return err
+				}
+
+				return pipelineReadCmds(rd, cmds)
+			})
+		})
+	})
+}
+
+func (c *ClusterClient) txPipelineReadQueued(
+	ctx context.Context,
+	rd *proto.Reader,
+	statusCmd *StatusCmd,
+	cmds []Cmder,
+	failedCmds *cmdsMap,
+) error {
+	// Parse queued replies.
+	if err := statusCmd.readReply(rd); err != nil {
+		return err
+	}
+
+	for _, cmd := range cmds {
+		err := statusCmd.readReply(rd)
+		if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
+			continue
+		}
+		return err
+	}
+
+	// Parse number of replies.
+	line, err := rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		return fmt.Errorf("redis: expected '*', but got line %q", line)
+	}
+
+	return nil
+}
+
+func (c *ClusterClient) cmdsMoved(
+	ctx context.Context, cmds []Cmder,
+	moved, ask bool,
+	addr string,
+	failedCmds *cmdsMap,
+) error {
+	node, err := c.nodes.GetOrCreate(addr)
+	if err != nil {
+		return err
+	}
+
+	if moved {
+		c.state.LazyReload()
+		for _, cmd := range cmds {
+			failedCmds.Add(node, cmd)
+		}
+		return nil
+	}
+
+	if ask {
+		for _, cmd := range cmds {
+			failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
+		}
+		return nil
+	}
+
+	return nil
+}
+
+func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	if len(keys) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one key")
+	}
+
+	slot := hashtag.Slot(keys[0])
+	for _, key := range keys[1:] {
+		if hashtag.Slot(key) != slot {
+			err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
+			return err
+		}
+	}
+
+	node, err := c.slotMasterNode(ctx, slot)
+	if err != nil {
+		return err
+	}
+
+	for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		err = node.Client.Watch(ctx, fn, keys...)
+		if err == nil {
+			break
+		}
+
+		moved, ask, addr := isMovedError(err)
+		if moved || ask {
+			node, err = c.nodes.GetOrCreate(addr)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
+			if isReadOnly {
+				c.state.LazyReload()
+			}
+			node, err = c.slotMasterNode(ctx, slot)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+
+		if shouldRetry(err, true) {
+			continue
+		}
+
+		return err
+	}
+
+	return err
+}
+
+func (c *ClusterClient) pubSub() *PubSub {
+	var node *clusterNode
+	pubsub := &PubSub{
+		opt: c.opt.clientOptions(),
+
+		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+			if node != nil {
+				panic("node != nil")
+			}
+
+			var err error
+			if len(channels) > 0 {
+				slot := hashtag.Slot(channels[0])
+				node, err = c.slotMasterNode(ctx, slot)
+			} else {
+				node, err = c.nodes.Random()
+			}
+			if err != nil {
+				return nil, err
+			}
+
+			cn, err := node.Client.newConn(context.TODO())
+			if err != nil {
+				node = nil
+
+				return nil, err
+			}
+
+			return cn, nil
+		},
+		closeConn: func(cn *pool.Conn) error {
+			err := node.Client.connPool.CloseConn(cn)
+			node = nil
+			return err
+		},
+	}
+	pubsub.init()
+
+	return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+	// Try 3 random nodes.
+	const nodeLimit = 3
+
+	addrs, err := c.nodes.Addrs()
+	if err != nil {
+		return nil, err
+	}
+
+	var firstErr error
+
+	perm := rand.Perm(len(addrs))
+	if len(perm) > nodeLimit {
+		perm = perm[:nodeLimit]
+	}
+
+	for _, idx := range perm {
+		addr := addrs[idx]
+
+		node, err := c.nodes.GetOrCreate(addr)
+		if err != nil {
+			if firstErr == nil {
+				firstErr = err
+			}
+			continue
+		}
+
+		info, err := node.Client.Command(ctx).Result()
+		if err == nil {
+			return info, nil
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	if firstErr == nil {
+		panic("not reached")
+	}
+	return nil, firstErr
+}
+
+func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
+	cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
+	if err != nil {
+		return nil
+	}
+
+	info := cmdsInfo[name]
+	if info == nil {
+		internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
+	}
+	return info
+}
+
+func (c *ClusterClient) cmdSlot(cmd Cmder) int {
+	args := cmd.Args()
+	if args[0] == "cluster" && args[1] == "getkeysinslot" {
+		return args[2].(int)
+	}
+
+	cmdInfo := c.cmdInfo(cmd.Name())
+	return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
+}
+
+func cmdSlot(cmd Cmder, pos int) int {
+	if pos == 0 {
+		return hashtag.RandomSlot()
+	}
+	firstKey := cmd.stringArg(pos)
+	return hashtag.Slot(firstKey)
+}
+
+func (c *ClusterClient) cmdNode(
+	ctx context.Context,
+	cmdInfo *CommandInfo,
+	slot int,
+) (*clusterNode, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
+		return c.slotReadOnlyNode(state, slot)
+	}
+	return state.slotMasterNode(slot)
+}
+
+func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
+	if c.opt.RouteByLatency {
+		return state.slotClosestNode(slot)
+	}
+	if c.opt.RouteRandomly {
+		return state.slotRandomNode(slot)
+	}
+	return state.slotSlaveNode(slot)
+}
+
+func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	return state.slotMasterNode(slot)
+}
+
+// SlaveForKey gets a client for a replica node to run any command on it.
+// This is especially useful if we want to run a particular lua script which has
+// only read only commands on the replica.
+// This is because other redis commands generally have a flag that points that
+// they are read only and automatically run on the replica nodes
+// if ClusterOptions.ReadOnly flag is set to true.
+func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
+	state, err := c.state.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	slot := hashtag.Slot(key)
+	node, err := c.slotReadOnlyNode(state, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
+// MasterForKey return a client to the master node for a particular key.
+func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
+	slot := hashtag.Slot(key)
+	node, err := c.slotMasterNode(ctx, slot)
+	if err != nil {
+		return nil, err
+	}
+	return node.Client, err
+}
+
+func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
+	for _, n := range nodes {
+		if n == node {
+			return nodes
+		}
+	}
+	return append(nodes, node)
+}
+
+func appendIfNotExists(ss []string, es ...string) []string {
+loop:
+	for _, e := range es {
+		for _, s := range ss {
+			if s == e {
+				continue loop
+			}
+		}
+		ss = append(ss, e)
+	}
+	return ss
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsMap struct {
+	mu sync.Mutex
+	m  map[*clusterNode][]Cmder
+}
+
+func newCmdsMap() *cmdsMap {
+	return &cmdsMap{
+		m: make(map[*clusterNode][]Cmder),
+	}
+}
+
+func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
+	m.mu.Lock()
+	m.m[node] = append(m.m[node], cmds...)
+	m.mu.Unlock()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
new file mode 100644
index 0000000..085bce8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
@@ -0,0 +1,109 @@
+package redis
+
+import (
+	"context"
+	"sync"
+	"sync/atomic"
+)
+
+func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "dbsize")
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		var size int64
+		err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
+			n, err := master.DBSize(ctx).Result()
+			if err != nil {
+				return err
+			}
+			atomic.AddInt64(&size, n)
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		} else {
+			cmd.val = size
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
+	cmd := NewStringCmd(ctx, "script", "load", script)
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptLoad(ctx, script).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			if cmd.Val() == "" {
+				cmd.val = val
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "flush")
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			return shard.ScriptFlush(ctx).Err()
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		}
+		return nil
+	})
+	return cmd
+}
+
+func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
+	}
+	cmd := NewBoolSliceCmd(ctx, args...)
+
+	result := make([]bool, len(hashes))
+	for i := range result {
+		result[i] = true
+	}
+
+	_ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
+		mu := &sync.Mutex{}
+		err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
+			val, err := shard.ScriptExists(ctx, hashes...).Result()
+			if err != nil {
+				return err
+			}
+
+			mu.Lock()
+			for i, v := range val {
+				result[i] = result[i] && v
+			}
+			mu.Unlock()
+
+			return nil
+		})
+		if err != nil {
+			cmd.SetErr(err)
+		} else {
+			cmd.val = result
+		}
+		return nil
+	})
+	return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
new file mode 100644
index 0000000..4bb12a8
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/command.go
@@ -0,0 +1,3478 @@
+package redis
+
+import (
+	"context"
+	"fmt"
+	"net"
+	"strconv"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hscan"
+	"github.com/go-redis/redis/v8/internal/proto"
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+type Cmder interface {
+	Name() string
+	FullName() string
+	Args() []interface{}
+	String() string
+	stringArg(int) string
+	firstKeyPos() int8
+	SetFirstKeyPos(int8)
+
+	readTimeout() *time.Duration
+	readReply(rd *proto.Reader) error
+
+	SetErr(error)
+	Err() error
+}
+
+func setCmdsErr(cmds []Cmder, e error) {
+	for _, cmd := range cmds {
+		if cmd.Err() == nil {
+			cmd.SetErr(e)
+		}
+	}
+}
+
+func cmdsFirstErr(cmds []Cmder) error {
+	for _, cmd := range cmds {
+		if err := cmd.Err(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeCmds(wr *proto.Writer, cmds []Cmder) error {
+	for _, cmd := range cmds {
+		if err := writeCmd(wr, cmd); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeCmd(wr *proto.Writer, cmd Cmder) error {
+	return wr.WriteArgs(cmd.Args())
+}
+
+func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
+	if pos := cmd.firstKeyPos(); pos != 0 {
+		return int(pos)
+	}
+
+	switch cmd.Name() {
+	case "eval", "evalsha":
+		if cmd.stringArg(2) != "0" {
+			return 3
+		}
+
+		return 0
+	case "publish":
+		return 1
+	case "memory":
+		// https://github.com/redis/redis/issues/7493
+		if cmd.stringArg(1) == "usage" {
+			return 2
+		}
+	}
+
+	if info != nil {
+		return int(info.FirstKeyPos)
+	}
+	return 0
+}
+
+func cmdString(cmd Cmder, val interface{}) string {
+	b := make([]byte, 0, 64)
+
+	for i, arg := range cmd.Args() {
+		if i > 0 {
+			b = append(b, ' ')
+		}
+		b = internal.AppendArg(b, arg)
+	}
+
+	if err := cmd.Err(); err != nil {
+		b = append(b, ": "...)
+		b = append(b, err.Error()...)
+	} else if val != nil {
+		b = append(b, ": "...)
+		b = internal.AppendArg(b, val)
+	}
+
+	return internal.String(b)
+}
+
+//------------------------------------------------------------------------------
+
+type baseCmd struct {
+	ctx    context.Context
+	args   []interface{}
+	err    error
+	keyPos int8
+
+	_readTimeout *time.Duration
+}
+
+var _ Cmder = (*Cmd)(nil)
+
+func (cmd *baseCmd) Name() string {
+	if len(cmd.args) == 0 {
+		return ""
+	}
+	// Cmd name must be lower cased.
+	return internal.ToLower(cmd.stringArg(0))
+}
+
+func (cmd *baseCmd) FullName() string {
+	switch name := cmd.Name(); name {
+	case "cluster", "command":
+		if len(cmd.args) == 1 {
+			return name
+		}
+		if s2, ok := cmd.args[1].(string); ok {
+			return name + " " + s2
+		}
+		return name
+	default:
+		return name
+	}
+}
+
+func (cmd *baseCmd) Args() []interface{} {
+	return cmd.args
+}
+
+func (cmd *baseCmd) stringArg(pos int) string {
+	if pos < 0 || pos >= len(cmd.args) {
+		return ""
+	}
+	arg := cmd.args[pos]
+	switch v := arg.(type) {
+	case string:
+		return v
+	default:
+		// TODO: consider using appendArg
+		return fmt.Sprint(v)
+	}
+}
+
+func (cmd *baseCmd) firstKeyPos() int8 {
+	return cmd.keyPos
+}
+
+func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
+	cmd.keyPos = keyPos
+}
+
+func (cmd *baseCmd) SetErr(e error) {
+	cmd.err = e
+}
+
+func (cmd *baseCmd) Err() error {
+	return cmd.err
+}
+
+func (cmd *baseCmd) readTimeout() *time.Duration {
+	return cmd._readTimeout
+}
+
+func (cmd *baseCmd) setReadTimeout(d time.Duration) {
+	cmd._readTimeout = &d
+}
+
+//------------------------------------------------------------------------------
+
+type Cmd struct {
+	baseCmd
+
+	val interface{}
+}
+
+func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
+	return &Cmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *Cmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *Cmd) SetVal(val interface{}) {
+	cmd.val = val
+}
+
+func (cmd *Cmd) Val() interface{} {
+	return cmd.val
+}
+
+func (cmd *Cmd) Result() (interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *Cmd) Text() (string, error) {
+	if cmd.err != nil {
+		return "", cmd.err
+	}
+	return toString(cmd.val)
+}
+
+func toString(val interface{}) (string, error) {
+	switch val := val.(type) {
+	case string:
+		return val, nil
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for String", val)
+		return "", err
+	}
+}
+
+func (cmd *Cmd) Int() (int, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case int64:
+		return int(val), nil
+	case string:
+		return strconv.Atoi(val)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Int", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toInt64(cmd.val)
+}
+
+func toInt64(val interface{}) (int64, error) {
+	switch val := val.(type) {
+	case int64:
+		return val, nil
+	case string:
+		return strconv.ParseInt(val, 10, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toUint64(cmd.val)
+}
+
+func toUint64(val interface{}) (uint64, error) {
+	switch val := val.(type) {
+	case int64:
+		return uint64(val), nil
+	case string:
+		return strconv.ParseUint(val, 10, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Float32() (float32, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toFloat32(cmd.val)
+}
+
+func toFloat32(val interface{}) (float32, error) {
+	switch val := val.(type) {
+	case int64:
+		return float32(val), nil
+	case string:
+		f, err := strconv.ParseFloat(val, 32)
+		if err != nil {
+			return 0, err
+		}
+		return float32(f), nil
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return toFloat64(cmd.val)
+}
+
+func toFloat64(val interface{}) (float64, error) {
+	switch val := val.(type) {
+	case int64:
+		return float64(val), nil
+	case string:
+		return strconv.ParseFloat(val, 64)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
+		return 0, err
+	}
+}
+
+func (cmd *Cmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	return toBool(cmd.val)
+}
+
+func toBool(val interface{}) (bool, error) {
+	switch val := val.(type) {
+	case int64:
+		return val != 0, nil
+	case string:
+		return strconv.ParseBool(val)
+	default:
+		err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
+		return false, err
+	}
+}
+
+func (cmd *Cmd) Slice() ([]interface{}, error) {
+	if cmd.err != nil {
+		return nil, cmd.err
+	}
+	switch val := cmd.val.(type) {
+	case []interface{}:
+		return val, nil
+	default:
+		return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
+	}
+}
+
+func (cmd *Cmd) StringSlice() ([]string, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	ss := make([]string, len(slice))
+	for i, iface := range slice {
+		val, err := toString(iface)
+		if err != nil {
+			return nil, err
+		}
+		ss[i] = val
+	}
+	return ss, nil
+}
+
+func (cmd *Cmd) Int64Slice() ([]int64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]int64, len(slice))
+	for i, iface := range slice {
+		val, err := toInt64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	nums := make([]uint64, len(slice))
+	for i, iface := range slice {
+		val, err := toUint64(iface)
+		if err != nil {
+			return nil, err
+		}
+		nums[i] = val
+	}
+	return nums, nil
+}
+
+func (cmd *Cmd) Float32Slice() ([]float32, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float32, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat32(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) Float64Slice() ([]float64, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	floats := make([]float64, len(slice))
+	for i, iface := range slice {
+		val, err := toFloat64(iface)
+		if err != nil {
+			return nil, err
+		}
+		floats[i] = val
+	}
+	return floats, nil
+}
+
+func (cmd *Cmd) BoolSlice() ([]bool, error) {
+	slice, err := cmd.Slice()
+	if err != nil {
+		return nil, err
+	}
+
+	bools := make([]bool, len(slice))
+	for i, iface := range slice {
+		val, err := toBool(iface)
+		if err != nil {
+			return nil, err
+		}
+		bools[i] = val
+	}
+	return bools, nil
+}
+
+func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadReply(sliceParser)
+	return err
+}
+
+// sliceParser implements proto.MultiBulkParse.
+func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
+	vals := make([]interface{}, n)
+	for i := 0; i < len(vals); i++ {
+		v, err := rd.ReadReply(sliceParser)
+		if err != nil {
+			if err == Nil {
+				vals[i] = nil
+				continue
+			}
+			if err, ok := err.(proto.RedisError); ok {
+				vals[i] = err
+				continue
+			}
+			return nil, err
+		}
+		vals[i] = v
+	}
+	return vals, nil
+}
+
+//------------------------------------------------------------------------------
+
+type SliceCmd struct {
+	baseCmd
+
+	val []interface{}
+}
+
+var _ Cmder = (*SliceCmd)(nil)
+
+func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
+	return &SliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *SliceCmd) SetVal(val []interface{}) {
+	cmd.val = val
+}
+
+func (cmd *SliceCmd) Val() []interface{} {
+	return cmd.val
+}
+
+func (cmd *SliceCmd) Result() ([]interface{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *SliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *SliceCmd) Scan(dst interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	// Pass the list of keys and values.
+	// Skip the first two args for: HMGET key
+	var args []interface{}
+	if cmd.args[0] == "hmget" {
+		args = cmd.args[2:]
+	} else {
+		// Otherwise, it's: MGET field field ...
+		args = cmd.args[1:]
+	}
+
+	return hscan.Scan(dst, args, cmd.val)
+}
+
+func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadArrayReply(sliceParser)
+	if err != nil {
+		return err
+	}
+	cmd.val = v.([]interface{})
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type StatusCmd struct {
+	baseCmd
+
+	val string
+}
+
+var _ Cmder = (*StatusCmd)(nil)
+
+func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
+	return &StatusCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StatusCmd) SetVal(val string) {
+	cmd.val = val
+}
+
+func (cmd *StatusCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StatusCmd) Result() (string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StatusCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadString()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntCmd struct {
+	baseCmd
+
+	val int64
+}
+
+var _ Cmder = (*IntCmd)(nil)
+
+func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
+	return &IntCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *IntCmd) SetVal(val int64) {
+	cmd.val = val
+}
+
+func (cmd *IntCmd) Val() int64 {
+	return cmd.val
+}
+
+func (cmd *IntCmd) Result() (int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntCmd) Uint64() (uint64, error) {
+	return uint64(cmd.val), cmd.err
+}
+
+func (cmd *IntCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadIntReply()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type IntSliceCmd struct {
+	baseCmd
+
+	val []int64
+}
+
+var _ Cmder = (*IntSliceCmd)(nil)
+
+func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
+	return &IntSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *IntSliceCmd) SetVal(val []int64) {
+	cmd.val = val
+}
+
+func (cmd *IntSliceCmd) Val() []int64 {
+	return cmd.val
+}
+
+func (cmd *IntSliceCmd) Result() ([]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *IntSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]int64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			num, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[i] = num
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type DurationCmd struct {
+	baseCmd
+
+	val       time.Duration
+	precision time.Duration
+}
+
+var _ Cmder = (*DurationCmd)(nil)
+
+func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
+	return &DurationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		precision: precision,
+	}
+}
+
+func (cmd *DurationCmd) SetVal(val time.Duration) {
+	cmd.val = val
+}
+
+func (cmd *DurationCmd) Val() time.Duration {
+	return cmd.val
+}
+
+func (cmd *DurationCmd) Result() (time.Duration, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *DurationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadIntReply()
+	if err != nil {
+		return err
+	}
+	switch n {
+	// -2 if the key does not exist
+	// -1 if the key exists but has no associated expire
+	case -2, -1:
+		cmd.val = time.Duration(n)
+	default:
+		cmd.val = time.Duration(n) * cmd.precision
+	}
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type TimeCmd struct {
+	baseCmd
+
+	val time.Time
+}
+
+var _ Cmder = (*TimeCmd)(nil)
+
+func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
+	return &TimeCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *TimeCmd) SetVal(val time.Time) {
+	cmd.val = val
+}
+
+func (cmd *TimeCmd) Val() time.Time {
+	return cmd.val
+}
+
+func (cmd *TimeCmd) Result() (time.Time, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *TimeCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d elements, expected 2", n)
+		}
+
+		sec, err := rd.ReadInt()
+		if err != nil {
+			return nil, err
+		}
+
+		microsec, err := rd.ReadInt()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = time.Unix(sec, microsec*1000)
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolCmd struct {
+	baseCmd
+
+	val bool
+}
+
+var _ Cmder = (*BoolCmd)(nil)
+
+func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
+	return &BoolCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *BoolCmd) SetVal(val bool) {
+	cmd.val = val
+}
+
+func (cmd *BoolCmd) Val() bool {
+	return cmd.val
+}
+
+func (cmd *BoolCmd) Result() (bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadReply(nil)
+	// `SET key value NX` returns nil when key already exists. But
+	// `SETNX key value` returns bool (0/1). So convert nil to bool.
+	if err == Nil {
+		cmd.val = false
+		return nil
+	}
+	if err != nil {
+		return err
+	}
+	switch v := v.(type) {
+	case int64:
+		cmd.val = v == 1
+		return nil
+	case string:
+		cmd.val = v == "OK"
+		return nil
+	default:
+		return fmt.Errorf("got %T, wanted int64 or string", v)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type StringCmd struct {
+	baseCmd
+
+	val string
+}
+
+var _ Cmder = (*StringCmd)(nil)
+
+func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
+	return &StringCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringCmd) SetVal(val string) {
+	cmd.val = val
+}
+
+func (cmd *StringCmd) Val() string {
+	return cmd.val
+}
+
+func (cmd *StringCmd) Result() (string, error) {
+	return cmd.Val(), cmd.err
+}
+
+func (cmd *StringCmd) Bytes() ([]byte, error) {
+	return util.StringToBytes(cmd.val), cmd.err
+}
+
+func (cmd *StringCmd) Bool() (bool, error) {
+	if cmd.err != nil {
+		return false, cmd.err
+	}
+	return strconv.ParseBool(cmd.val)
+}
+
+func (cmd *StringCmd) Int() (int, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.Atoi(cmd.Val())
+}
+
+func (cmd *StringCmd) Int64() (int64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseInt(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Uint64() (uint64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseUint(cmd.Val(), 10, 64)
+}
+
+func (cmd *StringCmd) Float32() (float32, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	f, err := strconv.ParseFloat(cmd.Val(), 32)
+	if err != nil {
+		return 0, err
+	}
+	return float32(f), nil
+}
+
+func (cmd *StringCmd) Float64() (float64, error) {
+	if cmd.err != nil {
+		return 0, cmd.err
+	}
+	return strconv.ParseFloat(cmd.Val(), 64)
+}
+
+func (cmd *StringCmd) Time() (time.Time, error) {
+	if cmd.err != nil {
+		return time.Time{}, cmd.err
+	}
+	return time.Parse(time.RFC3339Nano, cmd.Val())
+}
+
+func (cmd *StringCmd) Scan(val interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+	return proto.Scan([]byte(cmd.val), val)
+}
+
+func (cmd *StringCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadString()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatCmd struct {
+	baseCmd
+
+	val float64
+}
+
+var _ Cmder = (*FloatCmd)(nil)
+
+func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
+	return &FloatCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *FloatCmd) SetVal(val float64) {
+	cmd.val = val
+}
+
+func (cmd *FloatCmd) Val() float64 {
+	return cmd.val
+}
+
+func (cmd *FloatCmd) Result() (float64, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *FloatCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.val, err = rd.ReadFloatReply()
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type FloatSliceCmd struct {
+	baseCmd
+
+	val []float64
+}
+
+var _ Cmder = (*FloatSliceCmd)(nil)
+
+func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
+	return &FloatSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *FloatSliceCmd) SetVal(val []float64) {
+	cmd.val = val
+}
+
+func (cmd *FloatSliceCmd) Val() []float64 {
+	return cmd.val
+}
+
+func (cmd *FloatSliceCmd) Result() ([]float64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *FloatSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]float64, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch num, err := rd.ReadFloatReply(); {
+			case err == Nil:
+				cmd.val[i] = 0
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = num
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringSliceCmd struct {
+	baseCmd
+
+	val []string
+}
+
+var _ Cmder = (*StringSliceCmd)(nil)
+
+func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
+	return &StringSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringSliceCmd) SetVal(val []string) {
+	cmd.val = val
+}
+
+func (cmd *StringSliceCmd) Val() []string {
+	return cmd.val
+}
+
+func (cmd *StringSliceCmd) Result() ([]string, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *StringSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
+	return proto.ScanSlice(cmd.Val(), container)
+}
+
+func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]string, n)
+		for i := 0; i < len(cmd.val); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.val[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.val[i] = s
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type BoolSliceCmd struct {
+	baseCmd
+
+	val []bool
+}
+
+var _ Cmder = (*BoolSliceCmd)(nil)
+
+func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
+	return &BoolSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *BoolSliceCmd) SetVal(val []bool) {
+	cmd.val = val
+}
+
+func (cmd *BoolSliceCmd) Val() []bool {
+	return cmd.val
+}
+
+func (cmd *BoolSliceCmd) Result() ([]bool, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *BoolSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]bool, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[i] = n == 1
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStringMapCmd struct {
+	baseCmd
+
+	val map[string]string
+}
+
+var _ Cmder = (*StringStringMapCmd)(nil)
+
+func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
+	return &StringStringMapCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
+	cmd.val = val
+}
+
+func (cmd *StringStringMapCmd) Val() map[string]string {
+	return cmd.val
+}
+
+func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStringMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+// Scan scans the results from the map into a destination struct. The map keys
+// are matched in the Redis struct fields by the `redis:"field"` tag.
+func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
+	if cmd.err != nil {
+		return cmd.err
+	}
+
+	strct, err := hscan.Struct(dest)
+	if err != nil {
+		return err
+	}
+
+	for k, v := range cmd.val {
+		if err := strct.Scan(k, v); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]string, n/2)
+		for i := int64(0); i < n; i += 2 {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			value, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[key] = value
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringIntMapCmd struct {
+	baseCmd
+
+	val map[string]int64
+}
+
+var _ Cmder = (*StringIntMapCmd)(nil)
+
+func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
+	return &StringIntMapCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
+	cmd.val = val
+}
+
+func (cmd *StringIntMapCmd) Val() map[string]int64 {
+	return cmd.val
+}
+
+func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringIntMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]int64, n/2)
+		for i := int64(0); i < n; i += 2 {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			n, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[key] = n
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type StringStructMapCmd struct {
+	baseCmd
+
+	val map[string]struct{}
+}
+
+var _ Cmder = (*StringStructMapCmd)(nil)
+
+func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
+	return &StringStructMapCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
+	cmd.val = val
+}
+
+func (cmd *StringStructMapCmd) Val() map[string]struct{} {
+	return cmd.val
+}
+
+func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *StringStructMapCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]struct{}, n)
+		for i := int64(0); i < n; i++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+			cmd.val[key] = struct{}{}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XMessage struct {
+	ID     string
+	Values map[string]interface{}
+}
+
+type XMessageSliceCmd struct {
+	baseCmd
+
+	val []XMessage
+}
+
+var _ Cmder = (*XMessageSliceCmd)(nil)
+
+func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
+	return &XMessageSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
+	cmd.val = val
+}
+
+func (cmd *XMessageSliceCmd) Val() []XMessage {
+	return cmd.val
+}
+
+func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XMessageSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
+	var err error
+	cmd.val, err = readXMessageSlice(rd)
+	return err
+}
+
+func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	msgs := make([]XMessage, n)
+	for i := 0; i < n; i++ {
+		var err error
+		msgs[i], err = readXMessage(rd)
+		if err != nil {
+			return nil, err
+		}
+	}
+	return msgs, nil
+}
+
+func readXMessage(rd *proto.Reader) (XMessage, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return XMessage{}, err
+	}
+	if n != 2 {
+		return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
+	}
+
+	id, err := rd.ReadString()
+	if err != nil {
+		return XMessage{}, err
+	}
+
+	var values map[string]interface{}
+
+	v, err := rd.ReadArrayReply(stringInterfaceMapParser)
+	if err != nil {
+		if err != proto.Nil {
+			return XMessage{}, err
+		}
+	} else {
+		values = v.(map[string]interface{})
+	}
+
+	return XMessage{
+		ID:     id,
+		Values: values,
+	}, nil
+}
+
+// stringInterfaceMapParser implements proto.MultiBulkParse.
+func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
+	m := make(map[string]interface{}, n/2)
+	for i := int64(0); i < n; i += 2 {
+		key, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		value, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		m[key] = value
+	}
+	return m, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XStream struct {
+	Stream   string
+	Messages []XMessage
+}
+
+type XStreamSliceCmd struct {
+	baseCmd
+
+	val []XStream
+}
+
+var _ Cmder = (*XStreamSliceCmd)(nil)
+
+func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
+	return &XStreamSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
+	cmd.val = val
+}
+
+func (cmd *XStreamSliceCmd) Val() []XStream {
+	return cmd.val
+}
+
+func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XStreamSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]XStream, n)
+		for i := 0; i < len(cmd.val); i++ {
+			i := i
+			_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				if n != 2 {
+					return nil, fmt.Errorf("got %d, wanted 2", n)
+				}
+
+				stream, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				msgs, err := readXMessageSlice(rd)
+				if err != nil {
+					return nil, err
+				}
+
+				cmd.val[i] = XStream{
+					Stream:   stream,
+					Messages: msgs,
+				}
+				return nil, nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPending struct {
+	Count     int64
+	Lower     string
+	Higher    string
+	Consumers map[string]int64
+}
+
+type XPendingCmd struct {
+	baseCmd
+	val *XPending
+}
+
+var _ Cmder = (*XPendingCmd)(nil)
+
+func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
+	return &XPendingCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XPendingCmd) SetVal(val *XPending) {
+	cmd.val = val
+}
+
+func (cmd *XPendingCmd) Val() *XPending {
+	return cmd.val
+}
+
+func (cmd *XPendingCmd) Result() (*XPending, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XPendingCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 4 {
+			return nil, fmt.Errorf("got %d, wanted 4", n)
+		}
+
+		count, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		lower, err := rd.ReadString()
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		higher, err := rd.ReadString()
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		cmd.val = &XPending{
+			Count:  count,
+			Lower:  lower,
+			Higher: higher,
+		}
+		_, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+			for i := int64(0); i < n; i++ {
+				_, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+					if n != 2 {
+						return nil, fmt.Errorf("got %d, wanted 2", n)
+					}
+
+					consumerName, err := rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					consumerPending, err := rd.ReadInt()
+					if err != nil {
+						return nil, err
+					}
+
+					if cmd.val.Consumers == nil {
+						cmd.val.Consumers = make(map[string]int64)
+					}
+					cmd.val.Consumers[consumerName] = consumerPending
+
+					return nil, nil
+				})
+				if err != nil {
+					return nil, err
+				}
+			}
+			return nil, nil
+		})
+		if err != nil && err != Nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XPendingExt struct {
+	ID         string
+	Consumer   string
+	Idle       time.Duration
+	RetryCount int64
+}
+
+type XPendingExtCmd struct {
+	baseCmd
+	val []XPendingExt
+}
+
+var _ Cmder = (*XPendingExtCmd)(nil)
+
+func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
+	return &XPendingExtCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
+	cmd.val = val
+}
+
+func (cmd *XPendingExtCmd) Val() []XPendingExt {
+	return cmd.val
+}
+
+func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XPendingExtCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]XPendingExt, 0, n)
+		for i := int64(0); i < n; i++ {
+			_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				if n != 4 {
+					return nil, fmt.Errorf("got %d, wanted 4", n)
+				}
+
+				id, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				consumer, err := rd.ReadString()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				idle, err := rd.ReadIntReply()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				retryCount, err := rd.ReadIntReply()
+				if err != nil && err != Nil {
+					return nil, err
+				}
+
+				cmd.val = append(cmd.val, XPendingExt{
+					ID:         id,
+					Consumer:   consumer,
+					Idle:       time.Duration(idle) * time.Millisecond,
+					RetryCount: retryCount,
+				})
+				return nil, nil
+			})
+			if err != nil {
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimCmd struct {
+	baseCmd
+
+	start string
+	val   []XMessage
+}
+
+var _ Cmder = (*XAutoClaimCmd)(nil)
+
+func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
+	return &XAutoClaimCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val, err = readXMessageSlice(rd)
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XAutoClaimJustIDCmd struct {
+	baseCmd
+
+	start string
+	val   []string
+}
+
+var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
+
+func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
+	return &XAutoClaimJustIDCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
+	cmd.val = val
+	cmd.start = start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
+	return cmd.val, cmd.start
+}
+
+func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
+	return cmd.val, cmd.start, cmd.err
+}
+
+func (cmd *XAutoClaimJustIDCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 2 {
+			return nil, fmt.Errorf("got %d, wanted 2", n)
+		}
+		var err error
+
+		cmd.start, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val = make([]string, nn)
+		for i := 0; i < nn; i++ {
+			cmd.val[i], err = rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoConsumersCmd struct {
+	baseCmd
+	val []XInfoConsumer
+}
+
+type XInfoConsumer struct {
+	Name    string
+	Pending int64
+	Idle    int64
+}
+
+var _ Cmder = (*XInfoConsumersCmd)(nil)
+
+func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
+	return &XInfoConsumersCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "consumers", stream, group},
+		},
+	}
+}
+
+func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
+	cmd.val = val
+}
+
+func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
+	return cmd.val
+}
+
+func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoConsumersCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]XInfoConsumer, n)
+
+	for i := 0; i < n; i++ {
+		cmd.val[i], err = readXConsumerInfo(rd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
+	var consumer XInfoConsumer
+
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return consumer, err
+	}
+	if n != 6 {
+		return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
+	}
+
+	for i := 0; i < 3; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		val, err := rd.ReadString()
+		if err != nil {
+			return consumer, err
+		}
+
+		switch key {
+		case "name":
+			consumer.Name = val
+		case "pending":
+			consumer.Pending, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		case "idle":
+			consumer.Idle, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return consumer, err
+			}
+		default:
+			return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
+		}
+	}
+
+	return consumer, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoGroupsCmd struct {
+	baseCmd
+	val []XInfoGroup
+}
+
+type XInfoGroup struct {
+	Name            string
+	Consumers       int64
+	Pending         int64
+	LastDeliveredID string
+}
+
+var _ Cmder = (*XInfoGroupsCmd)(nil)
+
+func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
+	return &XInfoGroupsCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "groups", stream},
+		},
+	}
+}
+
+func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
+	cmd.val = val
+}
+
+func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
+	return cmd.val
+}
+
+func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoGroupsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]XInfoGroup, n)
+
+	for i := 0; i < n; i++ {
+		cmd.val[i], err = readXGroupInfo(rd)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
+	var group XInfoGroup
+
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return group, err
+	}
+	if n != 8 {
+		return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
+	}
+
+	for i := 0; i < 4; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return group, err
+		}
+
+		val, err := rd.ReadString()
+		if err != nil {
+			return group, err
+		}
+
+		switch key {
+		case "name":
+			group.Name = val
+		case "consumers":
+			group.Consumers, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return group, err
+			}
+		case "pending":
+			group.Pending, err = strconv.ParseInt(val, 0, 64)
+			if err != nil {
+				return group, err
+			}
+		case "last-delivered-id":
+			group.LastDeliveredID = val
+		default:
+			return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
+		}
+	}
+
+	return group, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamCmd struct {
+	baseCmd
+	val *XInfoStream
+}
+
+type XInfoStream struct {
+	Length          int64
+	RadixTreeKeys   int64
+	RadixTreeNodes  int64
+	Groups          int64
+	LastGeneratedID string
+	FirstEntry      XMessage
+	LastEntry       XMessage
+}
+
+var _ Cmder = (*XInfoStreamCmd)(nil)
+
+func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
+	return &XInfoStreamCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: []interface{}{"xinfo", "stream", stream},
+		},
+	}
+}
+
+func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
+	cmd.val = val
+}
+
+func (cmd *XInfoStreamCmd) Val() *XInfoStream {
+	return cmd.val
+}
+
+func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadReply(xStreamInfoParser)
+	if err != nil {
+		return err
+	}
+	cmd.val = v.(*XInfoStream)
+	return nil
+}
+
+func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	if n != 14 {
+		return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+			"wanted 14", n)
+	}
+	var info XInfoStream
+	for i := 0; i < 7; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		switch key {
+		case "length":
+			info.Length, err = rd.ReadIntReply()
+		case "radix-tree-keys":
+			info.RadixTreeKeys, err = rd.ReadIntReply()
+		case "radix-tree-nodes":
+			info.RadixTreeNodes, err = rd.ReadIntReply()
+		case "groups":
+			info.Groups, err = rd.ReadIntReply()
+		case "last-generated-id":
+			info.LastGeneratedID, err = rd.ReadString()
+		case "first-entry":
+			info.FirstEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
+		case "last-entry":
+			info.LastEntry, err = readXMessage(rd)
+			if err == Nil {
+				err = nil
+			}
+		default:
+			return nil, fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO STREAM reply", key)
+		}
+		if err != nil {
+			return nil, err
+		}
+	}
+	return &info, nil
+}
+
+//------------------------------------------------------------------------------
+
+type XInfoStreamFullCmd struct {
+	baseCmd
+	val *XInfoStreamFull
+}
+
+type XInfoStreamFull struct {
+	Length          int64
+	RadixTreeKeys   int64
+	RadixTreeNodes  int64
+	LastGeneratedID string
+	Entries         []XMessage
+	Groups          []XInfoStreamGroup
+}
+
+type XInfoStreamGroup struct {
+	Name            string
+	LastDeliveredID string
+	PelCount        int64
+	Pending         []XInfoStreamGroupPending
+	Consumers       []XInfoStreamConsumer
+}
+
+type XInfoStreamGroupPending struct {
+	ID            string
+	Consumer      string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+type XInfoStreamConsumer struct {
+	Name     string
+	SeenTime time.Time
+	PelCount int64
+	Pending  []XInfoStreamConsumerPending
+}
+
+type XInfoStreamConsumerPending struct {
+	ID            string
+	DeliveryTime  time.Time
+	DeliveryCount int64
+}
+
+var _ Cmder = (*XInfoStreamFullCmd)(nil)
+
+func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
+	return &XInfoStreamFullCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
+	cmd.val = val
+}
+
+func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
+	return cmd.val
+}
+
+func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *XInfoStreamFullCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+	if n != 12 {
+		return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+			"wanted 12", n)
+	}
+
+	cmd.val = &XInfoStreamFull{}
+
+	for i := 0; i < 6; i++ {
+		key, err := rd.ReadString()
+		if err != nil {
+			return err
+		}
+
+		switch key {
+		case "length":
+			cmd.val.Length, err = rd.ReadIntReply()
+		case "radix-tree-keys":
+			cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
+		case "radix-tree-nodes":
+			cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
+		case "last-generated-id":
+			cmd.val.LastGeneratedID, err = rd.ReadString()
+		case "entries":
+			cmd.val.Entries, err = readXMessageSlice(rd)
+		case "groups":
+			cmd.val.Groups, err = readStreamGroups(rd)
+		default:
+			return fmt.Errorf("redis: unexpected content %s "+
+				"in XINFO STREAM reply", key)
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+	groups := make([]XInfoStreamGroup, 0, n)
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 10 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 10", nn)
+		}
+
+		group := XInfoStreamGroup{}
+
+		for f := 0; f < 5; f++ {
+			key, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch key {
+			case "name":
+				group.Name, err = rd.ReadString()
+			case "last-delivered-id":
+				group.LastDeliveredID, err = rd.ReadString()
+			case "pel-count":
+				group.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				group.Pending, err = readXInfoStreamGroupPending(rd)
+			case "consumers":
+				group.Consumers, err = readXInfoStreamConsumers(rd)
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", key)
+			}
+
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		groups = append(groups, group)
+	}
+
+	return groups, nil
+}
+
+func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	pending := make([]XInfoStreamGroupPending, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 4 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 4", nn)
+		}
+
+		p := XInfoStreamGroupPending{}
+
+		p.ID, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		p.Consumer, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		delivery, err := rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+		p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+		p.DeliveryCount, err = rd.ReadIntReply()
+		if err != nil {
+			return nil, err
+		}
+
+		pending = append(pending, p)
+	}
+
+	return pending, nil
+}
+
+func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return nil, err
+	}
+
+	consumers := make([]XInfoStreamConsumer, 0, n)
+
+	for i := 0; i < n; i++ {
+		nn, err := rd.ReadArrayLen()
+		if err != nil {
+			return nil, err
+		}
+		if nn != 8 {
+			return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
+				"wanted 8", nn)
+		}
+
+		c := XInfoStreamConsumer{}
+
+		for f := 0; f < 4; f++ {
+			cKey, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			switch cKey {
+			case "name":
+				c.Name, err = rd.ReadString()
+			case "seen-time":
+				seen, err := rd.ReadIntReply()
+				if err != nil {
+					return nil, err
+				}
+				c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
+			case "pel-count":
+				c.PelCount, err = rd.ReadIntReply()
+			case "pending":
+				pendingNumber, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+
+				c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
+
+				for pn := 0; pn < pendingNumber; pn++ {
+					nn, err := rd.ReadArrayLen()
+					if err != nil {
+						return nil, err
+					}
+					if nn != 3 {
+						return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
+							"wanted 3", nn)
+					}
+
+					p := XInfoStreamConsumerPending{}
+
+					p.ID, err = rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+
+					delivery, err := rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+					p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
+
+					p.DeliveryCount, err = rd.ReadIntReply()
+					if err != nil {
+						return nil, err
+					}
+
+					c.Pending = append(c.Pending, p)
+				}
+			default:
+				return nil, fmt.Errorf("redis: unexpected content %s "+
+					"in XINFO STREAM reply", cKey)
+			}
+			if err != nil {
+				return nil, err
+			}
+		}
+		consumers = append(consumers, c)
+	}
+
+	return consumers, nil
+}
+
+//------------------------------------------------------------------------------
+
+type ZSliceCmd struct {
+	baseCmd
+
+	val []Z
+}
+
+var _ Cmder = (*ZSliceCmd)(nil)
+
+func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
+	return &ZSliceCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *ZSliceCmd) SetVal(val []Z) {
+	cmd.val = val
+}
+
+func (cmd *ZSliceCmd) Val() []Z {
+	return cmd.val
+}
+
+func (cmd *ZSliceCmd) Result() ([]Z, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *ZSliceCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]Z, n/2)
+		for i := 0; i < len(cmd.val); i++ {
+			member, err := rd.ReadString()
+			if err != nil {
+				return nil, err
+			}
+
+			score, err := rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+
+			cmd.val[i] = Z{
+				Member: member,
+				Score:  score,
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type ZWithKeyCmd struct {
+	baseCmd
+
+	val *ZWithKey
+}
+
+var _ Cmder = (*ZWithKeyCmd)(nil)
+
+func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
+	return &ZWithKeyCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
+	cmd.val = val
+}
+
+func (cmd *ZWithKeyCmd) Val() *ZWithKey {
+	return cmd.val
+}
+
+func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ZWithKeyCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		if n != 3 {
+			return nil, fmt.Errorf("got %d elements, expected 3", n)
+		}
+
+		cmd.val = &ZWithKey{}
+		var err error
+
+		cmd.val.Key, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val.Member, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+
+		cmd.val.Score, err = rd.ReadFloatReply()
+		if err != nil {
+			return nil, err
+		}
+
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type ScanCmd struct {
+	baseCmd
+
+	page   []string
+	cursor uint64
+
+	process cmdable
+}
+
+var _ Cmder = (*ScanCmd)(nil)
+
+func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
+	return &ScanCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		process: process,
+	}
+}
+
+func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
+	cmd.page = page
+	cmd.cursor = cursor
+}
+
+func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
+	return cmd.page, cmd.cursor
+}
+
+func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
+	return cmd.page, cmd.cursor, cmd.err
+}
+
+func (cmd *ScanCmd) String() string {
+	return cmdString(cmd, cmd.page)
+}
+
+func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
+	cmd.page, cmd.cursor, err = rd.ReadScanReply()
+	return err
+}
+
+// Iterator creates a new ScanIterator.
+func (cmd *ScanCmd) Iterator() *ScanIterator {
+	return &ScanIterator{
+		cmd: cmd,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type ClusterNode struct {
+	ID   string
+	Addr string
+}
+
+type ClusterSlot struct {
+	Start int
+	End   int
+	Nodes []ClusterNode
+}
+
+type ClusterSlotsCmd struct {
+	baseCmd
+
+	val []ClusterSlot
+}
+
+var _ Cmder = (*ClusterSlotsCmd)(nil)
+
+func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
+	return &ClusterSlotsCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
+	cmd.val = val
+}
+
+func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
+	return cmd.val
+}
+
+func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *ClusterSlotsCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]ClusterSlot, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n < 2 {
+				err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
+				return nil, err
+			}
+
+			start, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			end, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			nodes := make([]ClusterNode, n-2)
+			for j := 0; j < len(nodes); j++ {
+				n, err := rd.ReadArrayLen()
+				if err != nil {
+					return nil, err
+				}
+				if n != 2 && n != 3 {
+					err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
+					return nil, err
+				}
+
+				ip, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				port, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+
+				nodes[j].Addr = net.JoinHostPort(ip, port)
+
+				if n == 3 {
+					id, err := rd.ReadString()
+					if err != nil {
+						return nil, err
+					}
+					nodes[j].ID = id
+				}
+			}
+
+			cmd.val[i] = ClusterSlot{
+				Start: int(start),
+				End:   int(end),
+				Nodes: nodes,
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+// GeoLocation is used with GeoAdd to add geospatial location.
+type GeoLocation struct {
+	Name                      string
+	Longitude, Latitude, Dist float64
+	GeoHash                   int64
+}
+
+// GeoRadiusQuery is used with GeoRadius to query geospatial index.
+type GeoRadiusQuery struct {
+	Radius float64
+	// Can be m, km, ft, or mi. Default is km.
+	Unit        string
+	WithCoord   bool
+	WithDist    bool
+	WithGeoHash bool
+	Count       int
+	// Can be ASC or DESC. Default is no sort order.
+	Sort      string
+	Store     string
+	StoreDist string
+}
+
+type GeoLocationCmd struct {
+	baseCmd
+
+	q         *GeoRadiusQuery
+	locations []GeoLocation
+}
+
+var _ Cmder = (*GeoLocationCmd)(nil)
+
+func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
+	return &GeoLocationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: geoLocationArgs(q, args...),
+		},
+		q: q,
+	}
+}
+
+func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
+	args = append(args, q.Radius)
+	if q.Unit != "" {
+		args = append(args, q.Unit)
+	} else {
+		args = append(args, "km")
+	}
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithGeoHash {
+		args = append(args, "withhash")
+	}
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+	}
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+	if q.Store != "" {
+		args = append(args, "store")
+		args = append(args, q.Store)
+	}
+	if q.StoreDist != "" {
+		args = append(args, "storedist")
+		args = append(args, q.StoreDist)
+	}
+	return args
+}
+
+func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
+	cmd.locations = locations
+}
+
+func (cmd *GeoLocationCmd) Val() []GeoLocation {
+	return cmd.locations
+}
+
+func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.locations, cmd.err
+}
+
+func (cmd *GeoLocationCmd) String() string {
+	return cmdString(cmd, cmd.locations)
+}
+
+func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
+	v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
+	if err != nil {
+		return err
+	}
+	cmd.locations = v.([]GeoLocation)
+	return nil
+}
+
+func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		locs := make([]GeoLocation, 0, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(newGeoLocationParser(q))
+			if err != nil {
+				return nil, err
+			}
+			switch vv := v.(type) {
+			case string:
+				locs = append(locs, GeoLocation{
+					Name: vv,
+				})
+			case *GeoLocation:
+				// TODO: avoid copying
+				locs = append(locs, *vv)
+			default:
+				return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
+			}
+		}
+		return locs, nil
+	}
+}
+
+func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
+	return func(rd *proto.Reader, n int64) (interface{}, error) {
+		var loc GeoLocation
+		var err error
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return nil, err
+		}
+		if q.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithGeoHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+		if q.WithCoord {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n != 2 {
+				return nil, fmt.Errorf("got %d coordinates, expected 2", n)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		return &loc, nil
+	}
+}
+
+//------------------------------------------------------------------------------
+
+// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
+type GeoSearchQuery struct {
+	Member string
+
+	// Latitude and Longitude when using FromLonLat option.
+	Longitude float64
+	Latitude  float64
+
+	// Distance and unit when using ByRadius option.
+	// Can use m, km, ft, or mi. Default is km.
+	Radius     float64
+	RadiusUnit string
+
+	// Height, width and unit when using ByBox option.
+	// Can be m, km, ft, or mi. Default is km.
+	BoxWidth  float64
+	BoxHeight float64
+	BoxUnit   string
+
+	// Can be ASC or DESC. Default is no sort order.
+	Sort     string
+	Count    int
+	CountAny bool
+}
+
+type GeoSearchLocationQuery struct {
+	GeoSearchQuery
+
+	WithCoord bool
+	WithDist  bool
+	WithHash  bool
+}
+
+type GeoSearchStoreQuery struct {
+	GeoSearchQuery
+
+	// When using the StoreDist option, the command stores the items in a
+	// sorted set populated with their distance from the center of the circle or box,
+	// as a floating-point number, in the same unit specified for that shape.
+	StoreDist bool
+}
+
+func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+
+	if q.WithCoord {
+		args = append(args, "withcoord")
+	}
+	if q.WithDist {
+		args = append(args, "withdist")
+	}
+	if q.WithHash {
+		args = append(args, "withhash")
+	}
+
+	return args
+}
+
+func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
+	if q.Member != "" {
+		args = append(args, "frommember", q.Member)
+	} else {
+		args = append(args, "fromlonlat", q.Longitude, q.Latitude)
+	}
+
+	if q.Radius > 0 {
+		if q.RadiusUnit == "" {
+			q.RadiusUnit = "km"
+		}
+		args = append(args, "byradius", q.Radius, q.RadiusUnit)
+	} else {
+		if q.BoxUnit == "" {
+			q.BoxUnit = "km"
+		}
+		args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
+	}
+
+	if q.Sort != "" {
+		args = append(args, q.Sort)
+	}
+
+	if q.Count > 0 {
+		args = append(args, "count", q.Count)
+		if q.CountAny {
+			args = append(args, "any")
+		}
+	}
+
+	return args
+}
+
+type GeoSearchLocationCmd struct {
+	baseCmd
+
+	opt *GeoSearchLocationQuery
+	val []GeoLocation
+}
+
+var _ Cmder = (*GeoSearchLocationCmd)(nil)
+
+func NewGeoSearchLocationCmd(
+	ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
+) *GeoSearchLocationCmd {
+	return &GeoSearchLocationCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+		opt: opt,
+	}
+}
+
+func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
+	cmd.val = val
+}
+
+func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
+	return cmd.val
+}
+
+func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
+	return cmd.val, cmd.err
+}
+
+func (cmd *GeoSearchLocationCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
+	n, err := rd.ReadArrayLen()
+	if err != nil {
+		return err
+	}
+
+	cmd.val = make([]GeoLocation, n)
+	for i := 0; i < n; i++ {
+		_, err = rd.ReadArrayLen()
+		if err != nil {
+			return err
+		}
+
+		var loc GeoLocation
+
+		loc.Name, err = rd.ReadString()
+		if err != nil {
+			return err
+		}
+		if cmd.opt.WithDist {
+			loc.Dist, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithHash {
+			loc.GeoHash, err = rd.ReadIntReply()
+			if err != nil {
+				return err
+			}
+		}
+		if cmd.opt.WithCoord {
+			nn, err := rd.ReadArrayLen()
+			if err != nil {
+				return err
+			}
+			if nn != 2 {
+				return fmt.Errorf("got %d coordinates, expected 2", nn)
+			}
+
+			loc.Longitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+			loc.Latitude, err = rd.ReadFloatReply()
+			if err != nil {
+				return err
+			}
+		}
+
+		cmd.val[i] = loc
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+type GeoPos struct {
+	Longitude, Latitude float64
+}
+
+type GeoPosCmd struct {
+	baseCmd
+
+	val []*GeoPos
+}
+
+var _ Cmder = (*GeoPosCmd)(nil)
+
+func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
+	return &GeoPosCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
+	cmd.val = val
+}
+
+func (cmd *GeoPosCmd) Val() []*GeoPos {
+	return cmd.val
+}
+
+func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *GeoPosCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]*GeoPos, n)
+		for i := 0; i < len(cmd.val); i++ {
+			i := i
+			_, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+				longitude, err := rd.ReadFloatReply()
+				if err != nil {
+					return nil, err
+				}
+
+				latitude, err := rd.ReadFloatReply()
+				if err != nil {
+					return nil, err
+				}
+
+				cmd.val[i] = &GeoPos{
+					Longitude: longitude,
+					Latitude:  latitude,
+				}
+				return nil, nil
+			})
+			if err != nil {
+				if err == Nil {
+					cmd.val[i] = nil
+					continue
+				}
+				return nil, err
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
+
+//------------------------------------------------------------------------------
+
+type CommandInfo struct {
+	Name        string
+	Arity       int8
+	Flags       []string
+	ACLFlags    []string
+	FirstKeyPos int8
+	LastKeyPos  int8
+	StepCount   int8
+	ReadOnly    bool
+}
+
+type CommandsInfoCmd struct {
+	baseCmd
+
+	val map[string]*CommandInfo
+}
+
+var _ Cmder = (*CommandsInfoCmd)(nil)
+
+func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
+	return &CommandsInfoCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
+	cmd.val = val
+}
+
+func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
+	return cmd.val
+}
+
+func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *CommandsInfoCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make(map[string]*CommandInfo, n)
+		for i := int64(0); i < n; i++ {
+			v, err := rd.ReadReply(commandInfoParser)
+			if err != nil {
+				return nil, err
+			}
+			vv := v.(*CommandInfo)
+			cmd.val[vv.Name] = vv
+		}
+		return nil, nil
+	})
+	return err
+}
+
+func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
+	const numArgRedis5 = 6
+	const numArgRedis6 = 7
+
+	switch n {
+	case numArgRedis5, numArgRedis6:
+		// continue
+	default:
+		return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
+	}
+
+	var cmd CommandInfo
+	var err error
+
+	cmd.Name, err = rd.ReadString()
+	if err != nil {
+		return nil, err
+	}
+
+	arity, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.Arity = int8(arity)
+
+	_, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.Flags = make([]string, n)
+		for i := 0; i < len(cmd.Flags); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.Flags[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.Flags[i] = s
+			}
+		}
+		return nil, nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	firstKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.FirstKeyPos = int8(firstKeyPos)
+
+	lastKeyPos, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.LastKeyPos = int8(lastKeyPos)
+
+	stepCount, err := rd.ReadIntReply()
+	if err != nil {
+		return nil, err
+	}
+	cmd.StepCount = int8(stepCount)
+
+	for _, flag := range cmd.Flags {
+		if flag == "readonly" {
+			cmd.ReadOnly = true
+			break
+		}
+	}
+
+	if n == numArgRedis5 {
+		return &cmd, nil
+	}
+
+	_, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.ACLFlags = make([]string, n)
+		for i := 0; i < len(cmd.ACLFlags); i++ {
+			switch s, err := rd.ReadString(); {
+			case err == Nil:
+				cmd.ACLFlags[i] = ""
+			case err != nil:
+				return nil, err
+			default:
+				cmd.ACLFlags[i] = s
+			}
+		}
+		return nil, nil
+	})
+	if err != nil {
+		return nil, err
+	}
+
+	return &cmd, nil
+}
+
+//------------------------------------------------------------------------------
+
+type cmdsInfoCache struct {
+	fn func(ctx context.Context) (map[string]*CommandInfo, error)
+
+	once internal.Once
+	cmds map[string]*CommandInfo
+}
+
+func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
+	return &cmdsInfoCache{
+		fn: fn,
+	}
+}
+
+func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
+	err := c.once.Do(func() error {
+		cmds, err := c.fn(ctx)
+		if err != nil {
+			return err
+		}
+
+		// Extensions have cmd names in upper case. Convert them to lower case.
+		for k, v := range cmds {
+			lower := internal.ToLower(k)
+			if lower != k {
+				cmds[lower] = v
+			}
+		}
+
+		c.cmds = cmds
+		return nil
+	})
+	return c.cmds, err
+}
+
+//------------------------------------------------------------------------------
+
+type SlowLog struct {
+	ID       int64
+	Time     time.Time
+	Duration time.Duration
+	Args     []string
+	// These are also optional fields emitted only by Redis 4.0 or greater:
+	// https://redis.io/commands/slowlog#output-format
+	ClientAddr string
+	ClientName string
+}
+
+type SlowLogCmd struct {
+	baseCmd
+
+	val []SlowLog
+}
+
+var _ Cmder = (*SlowLogCmd)(nil)
+
+func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
+	return &SlowLogCmd{
+		baseCmd: baseCmd{
+			ctx:  ctx,
+			args: args,
+		},
+	}
+}
+
+func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
+	cmd.val = val
+}
+
+func (cmd *SlowLogCmd) Val() []SlowLog {
+	return cmd.val
+}
+
+func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
+	return cmd.Val(), cmd.Err()
+}
+
+func (cmd *SlowLogCmd) String() string {
+	return cmdString(cmd, cmd.val)
+}
+
+func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
+	_, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
+		cmd.val = make([]SlowLog, n)
+		for i := 0; i < len(cmd.val); i++ {
+			n, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if n < 4 {
+				err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
+				return nil, err
+			}
+
+			id, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+
+			createdAt, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			createdAtTime := time.Unix(createdAt, 0)
+
+			costs, err := rd.ReadIntReply()
+			if err != nil {
+				return nil, err
+			}
+			costsDuration := time.Duration(costs) * time.Microsecond
+
+			cmdLen, err := rd.ReadArrayLen()
+			if err != nil {
+				return nil, err
+			}
+			if cmdLen < 1 {
+				err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
+				return nil, err
+			}
+
+			cmdString := make([]string, cmdLen)
+			for i := 0; i < cmdLen; i++ {
+				cmdString[i], err = rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+			}
+
+			var address, name string
+			for i := 4; i < n; i++ {
+				str, err := rd.ReadString()
+				if err != nil {
+					return nil, err
+				}
+				if i == 4 {
+					address = str
+				} else if i == 5 {
+					name = str
+				}
+			}
+
+			cmd.val[i] = SlowLog{
+				ID:         id,
+				Time:       createdAtTime,
+				Duration:   costsDuration,
+				Args:       cmdString,
+				ClientAddr: address,
+				ClientName: name,
+			}
+		}
+		return nil, nil
+	})
+	return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
new file mode 100644
index 0000000..bbfe089
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/commands.go
@@ -0,0 +1,3475 @@
+package redis
+
+import (
+	"context"
+	"errors"
+	"io"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+)
+
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+// For example:
+//
+//    rdb.Set(ctx, key, value, redis.KeepTTL)
+const KeepTTL = -1
+
+func usePrecise(dur time.Duration) bool {
+	return dur < time.Second || dur%time.Second != 0
+}
+
+func formatMs(ctx context.Context, dur time.Duration) int64 {
+	if dur > 0 && dur < time.Millisecond {
+		internal.Logger.Printf(
+			ctx,
+			"specified duration is %s, but minimal supported value is %s - truncating to 1ms",
+			dur, time.Millisecond,
+		)
+		return 1
+	}
+	return int64(dur / time.Millisecond)
+}
+
+func formatSec(ctx context.Context, dur time.Duration) int64 {
+	if dur > 0 && dur < time.Second {
+		internal.Logger.Printf(
+			ctx,
+			"specified duration is %s, but minimal supported value is %s - truncating to 1s",
+			dur, time.Second,
+		)
+		return 1
+	}
+	return int64(dur / time.Second)
+}
+
+func appendArgs(dst, src []interface{}) []interface{} {
+	if len(src) == 1 {
+		return appendArg(dst, src[0])
+	}
+
+	dst = append(dst, src...)
+	return dst
+}
+
+func appendArg(dst []interface{}, arg interface{}) []interface{} {
+	switch arg := arg.(type) {
+	case []string:
+		for _, s := range arg {
+			dst = append(dst, s)
+		}
+		return dst
+	case []interface{}:
+		dst = append(dst, arg...)
+		return dst
+	case map[string]interface{}:
+		for k, v := range arg {
+			dst = append(dst, k, v)
+		}
+		return dst
+	case map[string]string:
+		for k, v := range arg {
+			dst = append(dst, k, v)
+		}
+		return dst
+	default:
+		return append(dst, arg)
+	}
+}
+
+type Cmdable interface {
+	Pipeline() Pipeliner
+	Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+
+	TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
+	TxPipeline() Pipeliner
+
+	Command(ctx context.Context) *CommandsInfoCmd
+	ClientGetName(ctx context.Context) *StringCmd
+	Echo(ctx context.Context, message interface{}) *StringCmd
+	Ping(ctx context.Context) *StatusCmd
+	Quit(ctx context.Context) *StatusCmd
+	Del(ctx context.Context, keys ...string) *IntCmd
+	Unlink(ctx context.Context, keys ...string) *IntCmd
+	Dump(ctx context.Context, key string) *StringCmd
+	Exists(ctx context.Context, keys ...string) *IntCmd
+	Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+	ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	Keys(ctx context.Context, pattern string) *StringSliceCmd
+	Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
+	Move(ctx context.Context, key string, db int) *BoolCmd
+	ObjectRefCount(ctx context.Context, key string) *IntCmd
+	ObjectEncoding(ctx context.Context, key string) *StringCmd
+	ObjectIdleTime(ctx context.Context, key string) *DurationCmd
+	Persist(ctx context.Context, key string) *BoolCmd
+	PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
+	PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
+	PTTL(ctx context.Context, key string) *DurationCmd
+	RandomKey(ctx context.Context) *StringCmd
+	Rename(ctx context.Context, key, newkey string) *StatusCmd
+	RenameNX(ctx context.Context, key, newkey string) *BoolCmd
+	Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+	RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
+	Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
+	SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
+	SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
+	Touch(ctx context.Context, keys ...string) *IntCmd
+	TTL(ctx context.Context, key string) *DurationCmd
+	Type(ctx context.Context, key string) *StatusCmd
+	Append(ctx context.Context, key, value string) *IntCmd
+	Decr(ctx context.Context, key string) *IntCmd
+	DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
+	Get(ctx context.Context, key string) *StringCmd
+	GetRange(ctx context.Context, key string, start, end int64) *StringCmd
+	GetSet(ctx context.Context, key string, value interface{}) *StringCmd
+	GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
+	GetDel(ctx context.Context, key string) *StringCmd
+	Incr(ctx context.Context, key string) *IntCmd
+	IncrBy(ctx context.Context, key string, value int64) *IntCmd
+	IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
+	MGet(ctx context.Context, keys ...string) *SliceCmd
+	MSet(ctx context.Context, values ...interface{}) *StatusCmd
+	MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
+	Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
+	// TODO: rename to SetEx
+	SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
+	SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
+	SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
+	StrLen(ctx context.Context, key string) *IntCmd
+	Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
+
+	GetBit(ctx context.Context, key string, offset int64) *IntCmd
+	SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
+	BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
+	BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
+	BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
+	BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
+	BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
+	BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
+	BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
+
+	Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
+	ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
+	SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+	HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+	ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
+
+	HDel(ctx context.Context, key string, fields ...string) *IntCmd
+	HExists(ctx context.Context, key, field string) *BoolCmd
+	HGet(ctx context.Context, key, field string) *StringCmd
+	HGetAll(ctx context.Context, key string) *StringStringMapCmd
+	HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
+	HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
+	HKeys(ctx context.Context, key string) *StringSliceCmd
+	HLen(ctx context.Context, key string) *IntCmd
+	HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
+	HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
+	HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
+	HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
+	HVals(ctx context.Context, key string) *StringSliceCmd
+	HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
+
+	BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
+	BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
+	LIndex(ctx context.Context, key string, index int64) *StringCmd
+	LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
+	LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+	LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
+	LLen(ctx context.Context, key string) *IntCmd
+	LPop(ctx context.Context, key string) *StringCmd
+	LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+	LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
+	LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
+	LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+	LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
+	LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
+	LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
+	RPop(ctx context.Context, key string) *StringCmd
+	RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
+	RPopLPush(ctx context.Context, source, destination string) *StringCmd
+	RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
+	RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
+	LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
+	BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
+
+	SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
+	SCard(ctx context.Context, key string) *IntCmd
+	SDiff(ctx context.Context, keys ...string) *StringSliceCmd
+	SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+	SInter(ctx context.Context, keys ...string) *StringSliceCmd
+	SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
+	SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
+	SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
+	SMembers(ctx context.Context, key string) *StringSliceCmd
+	SMembersMap(ctx context.Context, key string) *StringStructMapCmd
+	SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
+	SPop(ctx context.Context, key string) *StringCmd
+	SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
+	SRandMember(ctx context.Context, key string) *StringCmd
+	SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
+	SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+	SUnion(ctx context.Context, keys ...string) *StringSliceCmd
+	SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+	XAdd(ctx context.Context, a *XAddArgs) *StringCmd
+	XDel(ctx context.Context, stream string, ids ...string) *IntCmd
+	XLen(ctx context.Context, stream string) *IntCmd
+	XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
+	XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
+	XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
+	XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
+	XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
+	XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
+	XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
+	XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
+	XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
+	XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
+	XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+	XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
+	XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
+	XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
+	XPending(ctx context.Context, stream, group string) *XPendingCmd
+	XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
+	XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
+	XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
+	XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
+	XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
+
+	// TODO: XTrim and XTrimApprox remove in v9.
+	XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
+	XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
+	XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
+	XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
+	XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
+	XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
+	XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
+	XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
+
+	BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+	BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
+
+	// TODO: remove
+	//		ZAddCh
+	//		ZIncr
+	//		ZAddNXCh
+	//		ZAddXXCh
+	//		ZIncrNX
+	//		ZIncrXX
+	// 	in v9.
+	// 	use ZAddArgs and ZAddArgsIncr.
+
+	ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
+	ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
+	ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
+	ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
+	ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
+	ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
+	ZCard(ctx context.Context, key string) *IntCmd
+	ZCount(ctx context.Context, key, min, max string) *IntCmd
+	ZLexCount(ctx context.Context, key, min, max string) *IntCmd
+	ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
+	ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
+	ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
+	ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
+	ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
+	ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
+	ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
+	ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+	ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+	ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+	ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
+	ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
+	ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
+	ZRank(ctx context.Context, key, member string) *IntCmd
+	ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
+	ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
+	ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
+	ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
+	ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
+	ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
+	ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
+	ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
+	ZRevRank(ctx context.Context, key, member string) *IntCmd
+	ZScore(ctx context.Context, key, member string) *FloatCmd
+	ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
+	ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
+	ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
+	ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
+	ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
+	ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
+	ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
+
+	PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
+	PFCount(ctx context.Context, keys ...string) *IntCmd
+	PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
+
+	BgRewriteAOF(ctx context.Context) *StatusCmd
+	BgSave(ctx context.Context) *StatusCmd
+	ClientKill(ctx context.Context, ipPort string) *StatusCmd
+	ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
+	ClientList(ctx context.Context) *StringCmd
+	ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
+	ClientID(ctx context.Context) *IntCmd
+	ConfigGet(ctx context.Context, parameter string) *SliceCmd
+	ConfigResetStat(ctx context.Context) *StatusCmd
+	ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
+	ConfigRewrite(ctx context.Context) *StatusCmd
+	DBSize(ctx context.Context) *IntCmd
+	FlushAll(ctx context.Context) *StatusCmd
+	FlushAllAsync(ctx context.Context) *StatusCmd
+	FlushDB(ctx context.Context) *StatusCmd
+	FlushDBAsync(ctx context.Context) *StatusCmd
+	Info(ctx context.Context, section ...string) *StringCmd
+	LastSave(ctx context.Context) *IntCmd
+	Save(ctx context.Context) *StatusCmd
+	Shutdown(ctx context.Context) *StatusCmd
+	ShutdownSave(ctx context.Context) *StatusCmd
+	ShutdownNoSave(ctx context.Context) *StatusCmd
+	SlaveOf(ctx context.Context, host, port string) *StatusCmd
+	Time(ctx context.Context) *TimeCmd
+	DebugObject(ctx context.Context, key string) *StringCmd
+	ReadOnly(ctx context.Context) *StatusCmd
+	ReadWrite(ctx context.Context) *StatusCmd
+	MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
+
+	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+	ScriptFlush(ctx context.Context) *StatusCmd
+	ScriptKill(ctx context.Context) *StatusCmd
+	ScriptLoad(ctx context.Context, script string) *StringCmd
+
+	Publish(ctx context.Context, channel string, message interface{}) *IntCmd
+	PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
+	PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
+	PubSubNumPat(ctx context.Context) *IntCmd
+
+	ClusterSlots(ctx context.Context) *ClusterSlotsCmd
+	ClusterNodes(ctx context.Context) *StringCmd
+	ClusterMeet(ctx context.Context, host, port string) *StatusCmd
+	ClusterForget(ctx context.Context, nodeID string) *StatusCmd
+	ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
+	ClusterResetSoft(ctx context.Context) *StatusCmd
+	ClusterResetHard(ctx context.Context) *StatusCmd
+	ClusterInfo(ctx context.Context) *StringCmd
+	ClusterKeySlot(ctx context.Context, key string) *IntCmd
+	ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
+	ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
+	ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
+	ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
+	ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
+	ClusterSaveConfig(ctx context.Context) *StatusCmd
+	ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
+	ClusterFailover(ctx context.Context) *StatusCmd
+	ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
+	ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
+
+	GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
+	GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
+	GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
+	GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
+	GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
+	GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
+	GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
+	GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
+	GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
+	GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
+}
+
+type StatefulCmdable interface {
+	Cmdable
+	Auth(ctx context.Context, password string) *StatusCmd
+	AuthACL(ctx context.Context, username, password string) *StatusCmd
+	Select(ctx context.Context, index int) *StatusCmd
+	SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
+	ClientSetName(ctx context.Context, name string) *BoolCmd
+}
+
+var (
+	_ Cmdable = (*Client)(nil)
+	_ Cmdable = (*Tx)(nil)
+	_ Cmdable = (*Ring)(nil)
+	_ Cmdable = (*ClusterClient)(nil)
+)
+
+type cmdable func(ctx context.Context, cmd Cmder) error
+
+type statefulCmdable func(ctx context.Context, cmd Cmder) error
+
+//------------------------------------------------------------------------------
+
+func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "auth", password)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// AuthACL Perform an AUTH command, using the given user and pass.
+// Should be used to authenticate the current connection with one of the connections defined in the ACL list
+// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "auth", username, password)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
+	cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "select", index)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ClientSetName assigns a name to the connection.
+func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "client", "setname", name)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
+	cmd := NewCommandsInfoCmd(ctx, "command")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ClientGetName returns the name of the connection.
+func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "client", "getname")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
+	cmd := NewStringCmd(ctx, "echo", message)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Ping(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "ping")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Quit(_ context.Context) *StatusCmd {
+	panic("not implemented")
+}
+
+func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "del"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unlink"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "dump", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "exists"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "")
+}
+
+func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "NX")
+}
+
+func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "XX")
+}
+
+func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "GT")
+}
+
+func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	return c.expire(ctx, key, expiration, "LT")
+}
+
+func (c cmdable) expire(
+	ctx context.Context, key string, expiration time.Duration, mode string,
+) *BoolCmd {
+	args := make([]interface{}, 3, 4)
+	args[0] = "expire"
+	args[1] = key
+	args[2] = formatSec(ctx, expiration)
+	if mode != "" {
+		args = append(args, mode)
+	}
+
+	cmd := NewBoolCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "keys", pattern)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"migrate",
+		host,
+		port,
+		key,
+		db,
+		formatMs(ctx, timeout),
+	)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "move", key, db)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "object", "refcount", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "object", "encoding", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
+	cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "persist", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
+	cmd := NewBoolCmd(
+		ctx,
+		"pexpireat",
+		key,
+		tm.UnixNano()/int64(time.Millisecond),
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
+	cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "randomkey")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "rename", key, newkey)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"restore",
+		key,
+		formatMs(ctx, ttl),
+		value,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"restore",
+		key,
+		formatMs(ctx, ttl),
+		value,
+		"replace",
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type Sort struct {
+	By            string
+	Offset, Count int64
+	Get           []string
+	Order         string
+	Alpha         bool
+}
+
+func (sort *Sort) args(key string) []interface{} {
+	args := []interface{}{"sort", key}
+	if sort.By != "" {
+		args = append(args, "by", sort.By)
+	}
+	if sort.Offset != 0 || sort.Count != 0 {
+		args = append(args, "limit", sort.Offset, sort.Count)
+	}
+	for _, get := range sort.Get {
+		args = append(args, "get", get)
+	}
+	if sort.Order != "" {
+		args = append(args, sort.Order)
+	}
+	if sort.Alpha {
+		args = append(args, "alpha")
+	}
+	return args
+}
+
+func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, sort.args(key)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
+	args := sort.args(key)
+	if store != "" {
+		args = append(args, "store", store)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
+	cmd := NewSliceCmd(ctx, sort.args(key)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, len(keys)+1)
+	args[0] = "touch"
+	for i, key := range keys {
+		args[i+1] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
+	cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "type", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
+	cmd := NewIntCmd(ctx, "append", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "decr", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "decrby", key, decrement)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
+func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "get", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
+	cmd := NewStringCmd(ctx, "getrange", key, start, end)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
+	cmd := NewStringCmd(ctx, "getset", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
+// Requires Redis >= 6.2.0.
+func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
+	args := make([]interface{}, 0, 4)
+	args = append(args, "getex", key)
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(ctx, expiration))
+		} else {
+			args = append(args, "ex", formatSec(ctx, expiration))
+		}
+	} else if expiration == 0 {
+		args = append(args, "persist")
+	}
+
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GetDel redis-server version >= 6.2.0.
+func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "getdel", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "incr", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "incrby", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "mget"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// MSet is like Set but accepts multiple values:
+//   - MSet("key1", "value1", "key2", "value2")
+//   - MSet([]string{"key1", "value1", "key2", "value2"})
+//   - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
+	args := make([]interface{}, 1, 1+len(values))
+	args[0] = "mset"
+	args = appendArgs(args, values)
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// MSetNX is like SetNX but accepts multiple values:
+//   - MSetNX("key1", "value1", "key2", "value2")
+//   - MSetNX([]string{"key1", "value1", "key2", "value2"})
+//   - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
+func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
+	args := make([]interface{}, 1, 1+len(values))
+	args[0] = "msetnx"
+	args = appendArgs(args, values)
+	cmd := NewBoolCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// Set Redis `SET key value [expiration]` command.
+// Use expiration for `SETEX`-like behavior.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+	args := make([]interface{}, 3, 5)
+	args[0] = "set"
+	args[1] = key
+	args[2] = value
+	if expiration > 0 {
+		if usePrecise(expiration) {
+			args = append(args, "px", formatMs(ctx, expiration))
+		} else {
+			args = append(args, "ex", formatSec(ctx, expiration))
+		}
+	} else if expiration == KeepTTL {
+		args = append(args, "keepttl")
+	}
+
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetArgs provides arguments for the SetArgs function.
+type SetArgs struct {
+	// Mode can be `NX` or `XX` or empty.
+	Mode string
+
+	// Zero `TTL` or `Expiration` means that the key has no expiration time.
+	TTL      time.Duration
+	ExpireAt time.Time
+
+	// When Get is true, the command returns the old value stored at key, or nil when key did not exist.
+	Get bool
+
+	// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+	// otherwise you will receive an error: (error) ERR syntax error.
+	KeepTTL bool
+}
+
+// SetArgs supports all the options that the SET command supports.
+// It is the alternative to the Set function when you want
+// to have more control over the options.
+func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
+	args := []interface{}{"set", key, value}
+
+	if a.KeepTTL {
+		args = append(args, "keepttl")
+	}
+
+	if !a.ExpireAt.IsZero() {
+		args = append(args, "exat", a.ExpireAt.Unix())
+	}
+	if a.TTL > 0 {
+		if usePrecise(a.TTL) {
+			args = append(args, "px", formatMs(ctx, a.TTL))
+		} else {
+			args = append(args, "ex", formatSec(ctx, a.TTL))
+		}
+	}
+
+	if a.Mode != "" {
+		args = append(args, a.Mode)
+	}
+
+	if a.Get {
+		args = append(args, "get")
+	}
+
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetEX Redis `SETEX key expiration value` command.
+func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetNX Redis `SET key value [expiration] NX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	switch expiration {
+	case 0:
+		// Use old `SETNX` to support old Redis versions.
+		cmd = NewBoolCmd(ctx, "setnx", key, value)
+	case KeepTTL:
+		cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
+	default:
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
+		} else {
+			cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
+		}
+	}
+
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SetXX Redis `SET key value [expiration] XX` command.
+//
+// Zero expiration means the key has no expiration time.
+// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
+// otherwise you will receive an error: (error) ERR syntax error.
+func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
+	var cmd *BoolCmd
+	switch expiration {
+	case 0:
+		cmd = NewBoolCmd(ctx, "set", key, value, "xx")
+	case KeepTTL:
+		cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
+	default:
+		if usePrecise(expiration) {
+			cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
+		} else {
+			cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
+		}
+	}
+
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
+	cmd := NewIntCmd(ctx, "setrange", key, offset, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "strlen", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
+	args := []interface{}{"copy", sourceKey, destKey, "DB", db}
+	if replace {
+		args = append(args, "REPLACE")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "getbit", key, offset)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
+	cmd := NewIntCmd(
+		ctx,
+		"setbit",
+		key,
+		offset,
+		value,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type BitCount struct {
+	Start, End int64
+}
+
+func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
+	args := []interface{}{"bitcount", key}
+	if bitCount != nil {
+		args = append(
+			args,
+			bitCount.Start,
+			bitCount.End,
+		)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "bitop"
+	args[1] = op
+	args[2] = destKey
+	for i, key := range keys {
+		args[3+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
+	return c.bitOp(ctx, "and", destKey, keys...)
+}
+
+func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
+	return c.bitOp(ctx, "or", destKey, keys...)
+}
+
+func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
+	return c.bitOp(ctx, "xor", destKey, keys...)
+}
+
+func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
+	return c.bitOp(ctx, "not", destKey, key)
+}
+
+func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
+	args := make([]interface{}, 3+len(pos))
+	args[0] = "bitpos"
+	args[1] = key
+	args[2] = bit
+	switch len(pos) {
+	case 0:
+	case 1:
+		args[3] = pos[0]
+	case 2:
+		args[3] = pos[0]
+		args[4] = pos[1]
+	default:
+		panic("too many arguments")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
+	a := make([]interface{}, 0, 2+len(args))
+	a = append(a, "bitfield")
+	a = append(a, key)
+	a = append(a, args...)
+	cmd := NewIntSliceCmd(ctx, a...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
+	args := []interface{}{"scan", cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	if keyType != "" {
+		args = append(args, "type", keyType)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"sscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"hscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
+	args := []interface{}{"zscan", key, cursor}
+	if match != "" {
+		args = append(args, "match", match)
+	}
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewScanCmd(ctx, c, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hdel"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "hexists", key, field)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
+	cmd := NewStringCmd(ctx, "hget", key, field)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd(ctx, "hgetall", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "hkeys", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "hlen", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HMGet returns the values for the specified fields in the hash stored at key.
+// It returns an interface{} to distinguish between empty string and nil value.
+func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
+	args := make([]interface{}, 2+len(fields))
+	args[0] = "hmget"
+	args[1] = key
+	for i, field := range fields {
+		args[2+i] = field
+	}
+	cmd := NewSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HSet accepts values in following formats:
+//   - HSet("myhash", "key1", "value1", "key2", "value2")
+//   - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
+//   - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
+//
+// Note that it requires Redis v4 for multiple field/value pairs support.
+func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "hset"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
+func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "hmset"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewBoolCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "hvals", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// HRandField redis-server version >= 6.2.0.
+func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "hrandfield", key, count)
+	if withValues {
+		args = append(args, "withvalues")
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "blpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(ctx, timeout)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "brpop"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(keys)+1] = formatSec(ctx, timeout)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
+	cmd := NewStringCmd(
+		ctx,
+		"brpoplpush",
+		source,
+		destination,
+		formatSec(ctx, timeout),
+	)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
+	cmd := NewStringCmd(ctx, "lindex", key, index)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "llen", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "lpop", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "lpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type LPosArgs struct {
+	Rank, MaxLen int64
+}
+
+func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
+	args := []interface{}{"lpos", key, value}
+	if a.Rank != 0 {
+		args = append(args, "rank", a.Rank)
+	}
+	if a.MaxLen != 0 {
+		args = append(args, "maxlen", a.MaxLen)
+	}
+
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
+	args := []interface{}{"lpos", key, value, "count", count}
+	if a.Rank != 0 {
+		args = append(args, "rank", a.Rank)
+	}
+	if a.MaxLen != 0 {
+		args = append(args, "maxlen", a.MaxLen)
+	}
+	cmd := NewIntSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "lpush"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "lpushx"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(
+		ctx,
+		"lrange",
+		key,
+		start,
+		stop,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "lrem", key, count, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "lset", key, index, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
+	cmd := NewStatusCmd(
+		ctx,
+		"ltrim",
+		key,
+		start,
+		stop,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "rpop", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "rpop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
+	cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "rpush"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(values))
+	args[0] = "rpushx"
+	args[1] = key
+	args = appendArgs(args, values)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
+	cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BLMove(
+	ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
+) *StringCmd {
+	cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "sadd"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "scard", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sdiff"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sdiffstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sinter"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sinterstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "sismember", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
+func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "smismember"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewBoolSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMembers Redis `SMEMBERS key` command output as a slice.
+func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "smembers", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SMembersMap Redis `SMEMBERS key` command output as a map.
+func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
+	cmd := NewStringStructMapCmd(ctx, "smembers", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "smove", source, destination, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SPop Redis `SPOP key` command.
+func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "spop", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SPopN Redis `SPOP key count` command.
+func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "spop", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SRandMember Redis `SRANDMEMBER key` command.
+func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "srandmember", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// SRandMemberN Redis `SRANDMEMBER key count` command.
+func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "srem"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "sunion"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "sunionstore"
+	args[1] = destination
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// XAddArgs accepts values in the following formats:
+//   - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
+//   - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
+//   - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
+//
+// Note that map will not preserve the order of key-value pairs.
+// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
+type XAddArgs struct {
+	Stream     string
+	NoMkStream bool
+	MaxLen     int64 // MAXLEN N
+
+	// Deprecated: use MaxLen+Approx, remove in v9.
+	MaxLenApprox int64 // MAXLEN ~ N
+
+	MinID string
+	// Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
+	Approx bool
+	Limit  int64
+	ID     string
+	Values interface{}
+}
+
+// XAdd a.Limit has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
+	args := make([]interface{}, 0, 11)
+	args = append(args, "xadd", a.Stream)
+	if a.NoMkStream {
+		args = append(args, "nomkstream")
+	}
+	switch {
+	case a.MaxLen > 0:
+		if a.Approx {
+			args = append(args, "maxlen", "~", a.MaxLen)
+		} else {
+			args = append(args, "maxlen", a.MaxLen)
+		}
+	case a.MaxLenApprox > 0:
+		// TODO remove in v9.
+		args = append(args, "maxlen", "~", a.MaxLenApprox)
+	case a.MinID != "":
+		if a.Approx {
+			args = append(args, "minid", "~", a.MinID)
+		} else {
+			args = append(args, "minid", a.MinID)
+		}
+	}
+	if a.Limit > 0 {
+		args = append(args, "limit", a.Limit)
+	}
+	if a.ID != "" {
+		args = append(args, a.ID)
+	} else {
+		args = append(args, "*")
+	}
+	args = appendArg(args, a.Values)
+
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
+	args := []interface{}{"xdel", stream}
+	for _, id := range ids {
+		args = append(args, id)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xlen", stream)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
+	cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XReadArgs struct {
+	Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+	Count   int64
+	Block   time.Duration
+}
+
+func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
+	args := make([]interface{}, 0, 6+len(a.Streams))
+	args = append(args, "xread")
+
+	keyPos := int8(1)
+	if a.Count > 0 {
+		args = append(args, "count")
+		args = append(args, a.Count)
+		keyPos += 2
+	}
+	if a.Block >= 0 {
+		args = append(args, "block")
+		args = append(args, int64(a.Block/time.Millisecond))
+		keyPos += 2
+	}
+	args = append(args, "streams")
+	keyPos++
+	for _, s := range a.Streams {
+		args = append(args, s)
+	}
+
+	cmd := NewXStreamSliceCmd(ctx, args...)
+	if a.Block >= 0 {
+		cmd.setReadTimeout(a.Block)
+	}
+	cmd.SetFirstKeyPos(keyPos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
+	return c.XRead(ctx, &XReadArgs{
+		Streams: streams,
+		Block:   -1,
+	})
+}
+
+func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
+	cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XReadGroupArgs struct {
+	Group    string
+	Consumer string
+	Streams  []string // list of streams and ids, e.g. stream1 stream2 id1 id2
+	Count    int64
+	Block    time.Duration
+	NoAck    bool
+}
+
+func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
+	args := make([]interface{}, 0, 10+len(a.Streams))
+	args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
+
+	keyPos := int8(4)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+		keyPos += 2
+	}
+	if a.Block >= 0 {
+		args = append(args, "block", int64(a.Block/time.Millisecond))
+		keyPos += 2
+	}
+	if a.NoAck {
+		args = append(args, "noack")
+		keyPos++
+	}
+	args = append(args, "streams")
+	keyPos++
+	for _, s := range a.Streams {
+		args = append(args, s)
+	}
+
+	cmd := NewXStreamSliceCmd(ctx, args...)
+	if a.Block >= 0 {
+		cmd.setReadTimeout(a.Block)
+	}
+	cmd.SetFirstKeyPos(keyPos)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
+	args := []interface{}{"xack", stream, group}
+	for _, id := range ids {
+		args = append(args, id)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
+	cmd := NewXPendingCmd(ctx, "xpending", stream, group)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XPendingExtArgs struct {
+	Stream   string
+	Group    string
+	Idle     time.Duration
+	Start    string
+	End      string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
+	args := make([]interface{}, 0, 9)
+	args = append(args, "xpending", a.Stream, a.Group)
+	if a.Idle != 0 {
+		args = append(args, "idle", formatMs(ctx, a.Idle))
+	}
+	args = append(args, a.Start, a.End, a.Count)
+	if a.Consumer != "" {
+		args = append(args, a.Consumer)
+	}
+	cmd := NewXPendingExtCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+type XAutoClaimArgs struct {
+	Stream   string
+	Group    string
+	MinIdle  time.Duration
+	Start    string
+	Count    int64
+	Consumer string
+}
+
+func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
+	args := xAutoClaimArgs(ctx, a)
+	cmd := NewXAutoClaimCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
+	args := xAutoClaimArgs(ctx, a)
+	args = append(args, "justid")
+	cmd := NewXAutoClaimJustIDCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 8)
+	args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
+	if a.Count > 0 {
+		args = append(args, "count", a.Count)
+	}
+	return args
+}
+
+type XClaimArgs struct {
+	Stream   string
+	Group    string
+	Consumer string
+	MinIdle  time.Duration
+	Messages []string
+}
+
+func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
+	args := xClaimArgs(a)
+	cmd := NewXMessageSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
+	args := xClaimArgs(a)
+	args = append(args, "justid")
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func xClaimArgs(a *XClaimArgs) []interface{} {
+	args := make([]interface{}, 0, 5+len(a.Messages))
+	args = append(args,
+		"xclaim",
+		a.Stream,
+		a.Group, a.Consumer,
+		int64(a.MinIdle/time.Millisecond))
+	for _, id := range a.Messages {
+		args = append(args, id)
+	}
+	return args
+}
+
+// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
+// example:
+//		XTRIM key MAXLEN/MINID threshold LIMIT limit.
+//		XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
+// The redis-server version is lower than 6.2, please set limit to 0.
+func (c cmdable) xTrim(
+	ctx context.Context, key, strategy string,
+	approx bool, threshold interface{}, limit int64,
+) *IntCmd {
+	args := make([]interface{}, 0, 7)
+	args = append(args, "xtrim", key, strategy)
+	if approx {
+		args = append(args, "~")
+	}
+	args = append(args, threshold)
+	if limit > 0 {
+		args = append(args, "limit", limit)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// Deprecated: use XTrimMaxLen, remove in v9.
+func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// Deprecated: use XTrimMaxLenApprox, remove in v9.
+func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
+}
+
+// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MAXLEN maxLen
+func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
+}
+
+// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
+func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
+}
+
+// XTrimMinID No `~` rules are used, `limit` cannot be used.
+// cmd: XTRIM key MINID minID
+func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
+	return c.xTrim(ctx, key, "minid", false, minID, 0)
+}
+
+// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
+// issue: https://github.com/redis/redis/issues/9046
+// cmd: XTRIM key MINID ~ minID LIMIT limit
+func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
+	return c.xTrim(ctx, key, "minid", true, minID, limit)
+}
+
+func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
+	cmd := NewXInfoConsumersCmd(ctx, key, group)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
+	cmd := NewXInfoGroupsCmd(ctx, key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
+	cmd := NewXInfoStreamCmd(ctx, key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// XInfoStreamFull XINFO STREAM FULL [COUNT count]
+// redis-server >= 6.0.
+func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
+	args := make([]interface{}, 0, 6)
+	args = append(args, "xinfo", "stream", key, "full")
+	if count > 0 {
+		args = append(args, "count", count)
+	}
+	cmd := NewXInfoStreamFullCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Z represents sorted set member.
+type Z struct {
+	Score  float64
+	Member interface{}
+}
+
+// ZWithKey represents sorted set member including the name of the key where it was popped.
+type ZWithKey struct {
+	Z
+	Key string
+}
+
+// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
+type ZStore struct {
+	Keys    []string
+	Weights []float64
+	// Can be SUM, MIN or MAX.
+	Aggregate string
+}
+
+func (z ZStore) len() (n int) {
+	n = len(z.Keys)
+	if len(z.Weights) > 0 {
+		n += 1 + len(z.Weights)
+	}
+	if z.Aggregate != "" {
+		n += 2
+	}
+	return n
+}
+
+func (z ZStore) appendArgs(args []interface{}) []interface{} {
+	for _, key := range z.Keys {
+		args = append(args, key)
+	}
+	if len(z.Weights) > 0 {
+		args = append(args, "weights")
+		for _, weights := range z.Weights {
+			args = append(args, weights)
+		}
+	}
+	if z.Aggregate != "" {
+		args = append(args, "aggregate", z.Aggregate)
+	}
+	return args
+}
+
+// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
+func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "bzpopmax"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(ctx, timeout)
+	cmd := NewZWithKeyCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
+func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
+	args := make([]interface{}, 1+len(keys)+1)
+	args[0] = "bzpopmin"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	args[len(args)-1] = formatSec(ctx, timeout)
+	cmd := NewZWithKeyCmd(ctx, args...)
+	cmd.setReadTimeout(timeout)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
+type ZAddArgs struct {
+	NX      bool
+	XX      bool
+	LT      bool
+	GT      bool
+	Ch      bool
+	Members []Z
+}
+
+func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
+	a := make([]interface{}, 0, 6+2*len(args.Members))
+	a = append(a, "zadd", key)
+
+	// The GT, LT and NX options are mutually exclusive.
+	if args.NX {
+		a = append(a, "nx")
+	} else {
+		if args.XX {
+			a = append(a, "xx")
+		}
+		if args.GT {
+			a = append(a, "gt")
+		} else if args.LT {
+			a = append(a, "lt")
+		}
+	}
+	if args.Ch {
+		a = append(a, "ch")
+	}
+	if incr {
+		a = append(a, "incr")
+	}
+	for _, m := range args.Members {
+		a = append(a, m.Score)
+		a = append(a, m.Member)
+	}
+	return a
+}
+
+func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
+	cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// TODO: Compatible with v8 api, will be removed in v9.
+func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
+	args.Members = make([]Z, len(members))
+	for i, m := range members {
+		args.Members[i] = *m
+	}
+	cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZAdd Redis `ZADD key score member [score member ...]` command.
+func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{}, members...)
+}
+
+// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
+func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+	}, members...)
+}
+
+// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
+func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+	}, members...)
+}
+
+// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		Ch: true,
+	}, members...)
+}
+
+// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			NX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		NX: true,
+		Ch: true,
+	}, members...)
+}
+
+// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
+// Deprecated: Use
+//		client.ZAddArgs(ctx, ZAddArgs{
+//			XX: true,
+//			Ch: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
+	return c.zAdd(ctx, key, ZAddArgs{
+		XX: true,
+		Ch: true,
+	}, members...)
+}
+
+// ZIncr Redis `ZADD key INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		Members: []Z{*member},
+	})
+}
+
+// ZIncrNX Redis `ZADD key NX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			NX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		NX:      true,
+		Members: []Z{*member},
+	})
+}
+
+// ZIncrXX Redis `ZADD key XX INCR score member` command.
+// Deprecated: Use
+//		client.ZAddArgsIncr(ctx, ZAddArgs{
+//			XX: true,
+//			Members: []Z,
+//		})
+//	remove in v9.
+func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
+	return c.ZAddArgsIncr(ctx, key, ZAddArgs{
+		XX:      true,
+		Members: []Z{*member},
+	})
+}
+
+func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zcard", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zcount", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinterstore", destination, len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zinter", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "zmscore"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewFloatSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+	args := []interface{}{
+		"zpopmax",
+		key,
+	}
+
+	switch len(count) {
+	case 0:
+		break
+	case 1:
+		args = append(args, count[0])
+	default:
+		panic("too many arguments")
+	}
+
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
+	args := []interface{}{
+		"zpopmin",
+		key,
+	}
+
+	switch len(count) {
+	case 0:
+		break
+	case 1:
+		args = append(args, count[0])
+	default:
+		panic("too many arguments")
+	}
+
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZRangeArgs is all the options of the ZRange command.
+// In version> 6.2.0, you can replace the(cmd):
+//		ZREVRANGE,
+//		ZRANGEBYSCORE,
+//		ZREVRANGEBYSCORE,
+//		ZRANGEBYLEX,
+//		ZREVRANGEBYLEX.
+// Please pay attention to your redis-server version.
+//
+// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
+type ZRangeArgs struct {
+	Key string
+
+	// When the ByScore option is provided, the open interval(exclusive) can be set.
+	// By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
+	// It is similar to the deprecated(6.2.0+) ZRangeByScore command.
+	// For example:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"(3",
+	//	 		Stop: 				8,
+	//			ByScore:			true,
+	//	 	}
+	// 	 	cmd: "ZRange example-key (3 8 ByScore"  (3 < score <= 8).
+	//
+	// For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
+	// You can set the <Start> and <Stop> options as follows:
+	//		ZRangeArgs{
+	//			Key: 				"example-key",
+	//	 		Start: 				"[abc",
+	//	 		Stop: 				"(def",
+	//			ByLex:				true,
+	//	 	}
+	//		cmd: "ZRange example-key [abc (def ByLex"
+	//
+	// For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
+	// You can read the documentation for more information: https://redis.io/commands/zrange
+	Start interface{}
+	Stop  interface{}
+
+	// The ByScore and ByLex options are mutually exclusive.
+	ByScore bool
+	ByLex   bool
+
+	Rev bool
+
+	// limit offset count.
+	Offset int64
+	Count  int64
+}
+
+func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
+	// For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
+	if z.Rev && (z.ByScore || z.ByLex) {
+		args = append(args, z.Key, z.Stop, z.Start)
+	} else {
+		args = append(args, z.Key, z.Start, z.Stop)
+	}
+
+	if z.ByScore {
+		args = append(args, "byscore")
+	} else if z.ByLex {
+		args = append(args, "bylex")
+	}
+	if z.Rev {
+		args = append(args, "rev")
+	}
+	if z.Offset != 0 || z.Count != 0 {
+		args = append(args, "limit", z.Offset, z.Count)
+	}
+	return args
+}
+
+func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
+	args := make([]interface{}, 0, 9)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrange")
+	args = z.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+	return c.ZRangeArgs(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
+}
+
+func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+	return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
+		Key:   key,
+		Start: start,
+		Stop:  stop,
+	})
+}
+
+type ZRangeBy struct {
+	Min, Max      string
+	Offset, Count int64
+}
+
+func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Min, opt.Max}
+	if withScores {
+		args = append(args, "withscores")
+	}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
+}
+
+func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
+}
+
+func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
+	args := make([]interface{}, 0, 10)
+	args = append(args, "zrangestore", dst)
+	args = z.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zrank", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(members))
+	args[0] = "zrem"
+	args[1] = key
+	args = appendArgs(args, members)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
+	cmd := NewIntCmd(
+		ctx,
+		"zremrangebyrank",
+		key,
+		start,
+		stop,
+	)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
+	cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
+	args := []interface{}{zcmd, key, opt.Max, opt.Min}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
+}
+
+func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
+	return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
+}
+
+func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
+	args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
+	if opt.Offset != 0 || opt.Count != 0 {
+		args = append(
+			args,
+			"limit",
+			opt.Offset,
+			opt.Count,
+		)
+	}
+	cmd := NewZSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
+	cmd := NewIntCmd(ctx, "zrevrank", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
+	cmd := NewFloatCmd(ctx, "zscore", key, member)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
+	args := make([]interface{}, 0, 2+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunion", len(store.Keys))
+	args = store.appendArgs(args)
+	args = append(args, "withscores")
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
+	args := make([]interface{}, 0, 3+store.len())
+	args = append(args, "zunionstore", dest, len(store.Keys))
+	args = store.appendArgs(args)
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZRandMember redis-server version >= 6.2.0.
+func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
+	args := make([]interface{}, 0, 4)
+
+	// Although count=0 is meaningless, redis accepts count=0.
+	args = append(args, "zrandmember", key, count)
+	if withScores {
+		args = append(args, "withscores")
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiff redis-server version >= 6.2.0.
+func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+
+	cmd := NewStringSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffWithScores redis-server version >= 6.2.0.
+func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
+	args := make([]interface{}, 3+len(keys))
+	args[0] = "zdiff"
+	args[1] = len(keys)
+	for i, key := range keys {
+		args[i+2] = key
+	}
+	args[len(keys)+2] = "withscores"
+
+	cmd := NewZSliceCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ZDiffStore redis-server version >=6.2.0.
+func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
+	args := make([]interface{}, 0, 3+len(keys))
+	args = append(args, "zdiffstore", destination, len(keys))
+	for _, key := range keys {
+		args = append(args, key)
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
+	args := make([]interface{}, 2, 2+len(els))
+	args[0] = "pfadd"
+	args[1] = key
+	args = appendArgs(args, els)
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "pfcount"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "pfmerge"
+	args[1] = dest
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "bgrewriteaof")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "bgsave")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// ClientKillByFilter is new style syntax, while the ClientKill is old
+//
+//   CLIENT KILL <option> [value] ... <option> [value]
+func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
+	args := make([]interface{}, 2+len(keys))
+	args[0] = "client"
+	args[1] = "kill"
+	for i, key := range keys {
+		args[2+i] = key
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientList(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "client", "list")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
+	cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientID(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "client", "id")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "client", "unblock", id)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
+	cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "config", "get", parameter)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "config", "resetstat")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "config", "rewrite")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) DBSize(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "dbsize")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushall")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushall", "async")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushdb")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "flushdb", "async")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
+	args := []interface{}{"info"}
+	if len(section) > 0 {
+		args = append(args, section[0])
+	}
+	cmd := NewStringCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) LastSave(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "lastsave")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Save(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "save")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
+	var args []interface{}
+	if modifier == "" {
+		args = []interface{}{"shutdown"}
+	} else {
+		args = []interface{}{"shutdown", modifier}
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	if err := cmd.Err(); err != nil {
+		if err == io.EOF {
+			// Server quit as expected.
+			cmd.err = nil
+		}
+	} else {
+		// Server did not quit. String reply contains the reason.
+		cmd.err = errors.New(cmd.val)
+		cmd.val = ""
+	}
+	return cmd
+}
+
+func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
+	return c.shutdown(ctx, "")
+}
+
+func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
+	return c.shutdown(ctx, "save")
+}
+
+func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
+	return c.shutdown(ctx, "nosave")
+}
+
+func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "slaveof", host, port)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
+	cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) Sync(_ context.Context) {
+	panic("not implemented")
+}
+
+func (c cmdable) Time(ctx context.Context) *TimeCmd {
+	cmd := NewTimeCmd(ctx, "time")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
+	cmd := NewStringCmd(ctx, "debug", "object", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "readonly")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "readwrite")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
+	args := []interface{}{"memory", "usage", key}
+	if len(samples) > 0 {
+		if len(samples) != 1 {
+			panic("MemoryUsage expects single sample count")
+		}
+		args = append(args, "SAMPLES", samples[0])
+	}
+	cmd := NewIntCmd(ctx, args...)
+	cmd.SetFirstKeyPos(2)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+	cmdArgs[0] = "eval"
+	cmdArgs[1] = script
+	cmdArgs[2] = len(keys)
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	cmdArgs = appendArgs(cmdArgs, args)
+	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
+	cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
+	cmdArgs[0] = "evalsha"
+	cmdArgs[1] = sha1
+	cmdArgs[2] = len(keys)
+	for i, key := range keys {
+		cmdArgs[3+i] = key
+	}
+	cmdArgs = appendArgs(cmdArgs, args)
+	cmd := NewCmd(ctx, cmdArgs...)
+	cmd.SetFirstKeyPos(3)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
+	args := make([]interface{}, 2+len(hashes))
+	args[0] = "script"
+	args[1] = "exists"
+	for i, hash := range hashes {
+		args[2+i] = hash
+	}
+	cmd := NewBoolSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "flush")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "script", "kill")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
+	cmd := NewStringCmd(ctx, "script", "load", script)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+// Publish posts the message to the channel.
+func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
+	cmd := NewIntCmd(ctx, "publish", channel, message)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
+	args := []interface{}{"pubsub", "channels"}
+	if pattern != "*" {
+		args = append(args, pattern)
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
+	args := make([]interface{}, 2+len(channels))
+	args[0] = "pubsub"
+	args[1] = "numsub"
+	for i, channel := range channels {
+		args[2+i] = channel
+	}
+	cmd := NewStringIntMapCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
+	cmd := NewIntCmd(ctx, "pubsub", "numpat")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
+	cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "cluster", "nodes")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "cluster", "info")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
+	cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
+	cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
+	cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "delslots"
+	for i, slot := range slots {
+		args[2+i] = slot
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterDelSlots(ctx, slots...)
+}
+
+func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "cluster", "failover")
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
+	args := make([]interface{}, 2+len(slots))
+	args[0] = "cluster"
+	args[1] = "addslots"
+	for i, num := range slots {
+		args[2+i] = num
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
+	size := max - min + 1
+	slots := make([]int, size)
+	for i := 0; i < size; i++ {
+		slots[i] = min + i
+	}
+	return c.ClusterAddSlots(ctx, slots...)
+}
+
+//------------------------------------------------------------------------------
+
+func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
+	args := make([]interface{}, 2+3*len(geoLocation))
+	args[0] = "geoadd"
+	args[1] = key
+	for i, eachLoc := range geoLocation {
+		args[2+3*i] = eachLoc.Longitude
+		args[2+3*i+1] = eachLoc.Latitude
+		args[2+3*i+2] = eachLoc.Name
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadius is a read-only GEORADIUS_RO command.
+func (c cmdable) GeoRadius(
+	ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
+	if query.Store != "" || query.StoreDist != "" {
+		cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadiusStore is a writing GEORADIUS command.
+func (c cmdable) GeoRadiusStore(
+	ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
+) *IntCmd {
+	args := geoLocationArgs(query, "georadius", key, longitude, latitude)
+	cmd := NewIntCmd(ctx, args...)
+	if query.Store == "" && query.StoreDist == "" {
+		cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
+func (c cmdable) GeoRadiusByMember(
+	ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *GeoLocationCmd {
+	cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
+	if query.Store != "" || query.StoreDist != "" {
+		cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
+func (c cmdable) GeoRadiusByMemberStore(
+	ctx context.Context, key, member string, query *GeoRadiusQuery,
+) *IntCmd {
+	args := geoLocationArgs(query, "georadiusbymember", key, member)
+	cmd := NewIntCmd(ctx, args...)
+	if query.Store == "" && query.StoreDist == "" {
+		cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
+		return cmd
+	}
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
+	args := make([]interface{}, 0, 13)
+	args = append(args, "geosearch", key)
+	args = geoSearchArgs(q, args)
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchLocation(
+	ctx context.Context, key string, q *GeoSearchLocationQuery,
+) *GeoSearchLocationCmd {
+	args := make([]interface{}, 0, 16)
+	args = append(args, "geosearch", key)
+	args = geoSearchLocationArgs(q, args)
+	cmd := NewGeoSearchLocationCmd(ctx, q, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
+	args := make([]interface{}, 0, 15)
+	args = append(args, "geosearchstore", store, key)
+	args = geoSearchArgs(&q.GeoSearchQuery, args)
+	if q.StoreDist {
+		args = append(args, "storedist")
+	}
+	cmd := NewIntCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoDist(
+	ctx context.Context, key string, member1, member2, unit string,
+) *FloatCmd {
+	if unit == "" {
+		unit = "km"
+	}
+	cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geohash"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewStringSliceCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
+
+func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
+	args := make([]interface{}, 2+len(members))
+	args[0] = "geopos"
+	args[1] = key
+	for i, member := range members {
+		args[2+i] = member
+	}
+	cmd := NewGeoPosCmd(ctx, args...)
+	_ = c(ctx, cmd)
+	return cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go
new file mode 100644
index 0000000..5526253
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/doc.go
@@ -0,0 +1,4 @@
+/*
+Package redis implements a Redis client.
+*/
+package redis
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
new file mode 100644
index 0000000..521594b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/error.go
@@ -0,0 +1,144 @@
+package redis
+
+import (
+	"context"
+	"io"
+	"net"
+	"strings"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// ErrClosed performs any operation on the closed client will return this error.
+var ErrClosed = pool.ErrClosed
+
+type Error interface {
+	error
+
+	// RedisError is a no-op function but
+	// serves to distinguish types that are Redis
+	// errors from ordinary errors: a type is a
+	// Redis error if it has a RedisError method.
+	RedisError()
+}
+
+var _ Error = proto.RedisError("")
+
+func shouldRetry(err error, retryTimeout bool) bool {
+	switch err {
+	case io.EOF, io.ErrUnexpectedEOF:
+		return true
+	case nil, context.Canceled, context.DeadlineExceeded:
+		return false
+	}
+
+	if v, ok := err.(timeoutError); ok {
+		if v.Timeout() {
+			return retryTimeout
+		}
+		return true
+	}
+
+	s := err.Error()
+	if s == "ERR max number of clients reached" {
+		return true
+	}
+	if strings.HasPrefix(s, "LOADING ") {
+		return true
+	}
+	if strings.HasPrefix(s, "READONLY ") {
+		return true
+	}
+	if strings.HasPrefix(s, "CLUSTERDOWN ") {
+		return true
+	}
+	if strings.HasPrefix(s, "TRYAGAIN ") {
+		return true
+	}
+
+	return false
+}
+
+func isRedisError(err error) bool {
+	_, ok := err.(proto.RedisError)
+	return ok
+}
+
+func isBadConn(err error, allowTimeout bool, addr string) bool {
+	switch err {
+	case nil:
+		return false
+	case context.Canceled, context.DeadlineExceeded:
+		return true
+	}
+
+	if isRedisError(err) {
+		switch {
+		case isReadOnlyError(err):
+			// Close connections in read only state in case domain addr is used
+			// and domain resolves to a different Redis Server. See #790.
+			return true
+		case isMovedSameConnAddr(err, addr):
+			// Close connections when we are asked to move to the same addr
+			// of the connection. Force a DNS resolution when all connections
+			// of the pool are recycled
+			return true
+		default:
+			return false
+		}
+	}
+
+	if allowTimeout {
+		if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
+			return !netErr.Temporary()
+		}
+	}
+
+	return true
+}
+
+func isMovedError(err error) (moved bool, ask bool, addr string) {
+	if !isRedisError(err) {
+		return
+	}
+
+	s := err.Error()
+	switch {
+	case strings.HasPrefix(s, "MOVED "):
+		moved = true
+	case strings.HasPrefix(s, "ASK "):
+		ask = true
+	default:
+		return
+	}
+
+	ind := strings.LastIndex(s, " ")
+	if ind == -1 {
+		return false, false, ""
+	}
+	addr = s[ind+1:]
+	return
+}
+
+func isLoadingError(err error) bool {
+	return strings.HasPrefix(err.Error(), "LOADING ")
+}
+
+func isReadOnlyError(err error) bool {
+	return strings.HasPrefix(err.Error(), "READONLY ")
+}
+
+func isMovedSameConnAddr(err error, addr string) bool {
+	redisError := err.Error()
+	if !strings.HasPrefix(redisError, "MOVED ") {
+		return false
+	}
+	return strings.HasSuffix(redisError, " "+addr)
+}
+
+//------------------------------------------------------------------------------
+
+type timeoutError interface {
+	Timeout() bool
+}
diff --git a/vendor/github.com/go-redis/redis/v8/go.mod b/vendor/github.com/go-redis/redis/v8/go.mod
new file mode 100644
index 0000000..d2610c2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.mod
@@ -0,0 +1,20 @@
+module github.com/go-redis/redis/v8
+
+go 1.17
+
+require (
+	github.com/cespare/xxhash/v2 v2.1.2
+	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f
+	github.com/onsi/ginkgo v1.16.5
+	github.com/onsi/gomega v1.18.1
+)
+
+require (
+	github.com/fsnotify/fsnotify v1.4.9 // indirect
+	github.com/nxadm/tail v1.4.8 // indirect
+	golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 // indirect
+	golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect
+	golang.org/x/text v0.3.6 // indirect
+	gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+	gopkg.in/yaml.v2 v2.4.0 // indirect
+)
diff --git a/vendor/github.com/go-redis/redis/v8/go.sum b/vendor/github.com/go-redis/redis/v8/go.sum
new file mode 100644
index 0000000..e88f31a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/go.sum
@@ -0,0 +1,108 @@
+github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
+github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
+github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
+github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
+github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
+github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
+github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
+github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
+github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
+github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
+github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
+github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
+github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
+github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
+github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
+github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
+github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
+github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
+github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
+github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
+github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
+github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
+github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
+github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
+github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
+github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
+github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
+github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
+github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
+github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
+github.com/onsi/ginkgo/v2 v2.0.0 h1:CcuG/HvWNkkaqCUpJifQY8z7qEMBJya6aLPx6ftGyjQ=
+github.com/onsi/ginkgo/v2 v2.0.0/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c=
+github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
+github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
+github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY=
+github.com/onsi/gomega v1.18.1 h1:M1GfJqGRrBrrGGsbxzV5dqM2U2ApXefZCQpkukxYRLE=
+github.com/onsi/gomega v1.18.1/go.mod h1:0q+aL8jAiMXy9hbwj2mr5GziHiwhAIQpFmmtT5hitRs=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781 h1:DzZ89McO9/gWPsQXS/FVKAlG02ZjaQ6AlZRBimEYOd0=
+golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
+golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
+golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
+golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
+google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
+google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
+google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
+google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
+google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
+google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
+google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
+google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
+gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
+gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
+gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
+gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go
new file mode 100644
index 0000000..b97fa0d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/arg.go
@@ -0,0 +1,56 @@
+package internal
+
+import (
+	"fmt"
+	"strconv"
+	"time"
+)
+
+func AppendArg(b []byte, v interface{}) []byte {
+	switch v := v.(type) {
+	case nil:
+		return append(b, "<nil>"...)
+	case string:
+		return appendUTF8String(b, Bytes(v))
+	case []byte:
+		return appendUTF8String(b, v)
+	case int:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int8:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int16:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int32:
+		return strconv.AppendInt(b, int64(v), 10)
+	case int64:
+		return strconv.AppendInt(b, v, 10)
+	case uint:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint8:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint16:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint32:
+		return strconv.AppendUint(b, uint64(v), 10)
+	case uint64:
+		return strconv.AppendUint(b, v, 10)
+	case float32:
+		return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
+	case float64:
+		return strconv.AppendFloat(b, v, 'f', -1, 64)
+	case bool:
+		if v {
+			return append(b, "true"...)
+		}
+		return append(b, "false"...)
+	case time.Time:
+		return v.AppendFormat(b, time.RFC3339Nano)
+	default:
+		return append(b, fmt.Sprint(v)...)
+	}
+}
+
+func appendUTF8String(dst []byte, src []byte) []byte {
+	dst = append(dst, src...)
+	return dst
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
new file mode 100644
index 0000000..b3a4f21
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
@@ -0,0 +1,78 @@
+package hashtag
+
+import (
+	"strings"
+
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+const slotNumber = 16384
+
+// CRC16 implementation according to CCITT standards.
+// Copyright 2001-2010 Georges Menie (www.menie.org)
+// Copyright 2013 The Go Authors. All rights reserved.
+// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
+var crc16tab = [256]uint16{
+	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
+	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
+	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
+	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
+	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
+	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
+	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
+	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
+	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
+	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
+	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
+	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
+	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
+	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
+	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
+	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
+	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
+	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
+	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
+	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
+	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
+	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
+	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
+	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
+	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
+	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
+	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
+	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
+	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
+	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
+	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
+	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
+}
+
+func Key(key string) string {
+	if s := strings.IndexByte(key, '{'); s > -1 {
+		if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
+			return key[s+1 : s+e+1]
+		}
+	}
+	return key
+}
+
+func RandomSlot() int {
+	return rand.Intn(slotNumber)
+}
+
+// Slot returns a consistent slot number between 0 and 16383
+// for any given string key.
+func Slot(key string) int {
+	if key == "" {
+		return RandomSlot()
+	}
+	key = Key(key)
+	return int(crc16sum(key)) % slotNumber
+}
+
+func crc16sum(key string) (crc uint16) {
+	for i := 0; i < len(key); i++ {
+		crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
+	}
+	return
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
new file mode 100644
index 0000000..852c8bd
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
@@ -0,0 +1,201 @@
+package hscan
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+)
+
+// decoderFunc represents decoding functions for default built-in types.
+type decoderFunc func(reflect.Value, string) error
+
+var (
+	// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
+	decoders = []decoderFunc{
+		reflect.Bool:          decodeBool,
+		reflect.Int:           decodeInt,
+		reflect.Int8:          decodeInt8,
+		reflect.Int16:         decodeInt16,
+		reflect.Int32:         decodeInt32,
+		reflect.Int64:         decodeInt64,
+		reflect.Uint:          decodeUint,
+		reflect.Uint8:         decodeUint8,
+		reflect.Uint16:        decodeUint16,
+		reflect.Uint32:        decodeUint32,
+		reflect.Uint64:        decodeUint64,
+		reflect.Float32:       decodeFloat32,
+		reflect.Float64:       decodeFloat64,
+		reflect.Complex64:     decodeUnsupported,
+		reflect.Complex128:    decodeUnsupported,
+		reflect.Array:         decodeUnsupported,
+		reflect.Chan:          decodeUnsupported,
+		reflect.Func:          decodeUnsupported,
+		reflect.Interface:     decodeUnsupported,
+		reflect.Map:           decodeUnsupported,
+		reflect.Ptr:           decodeUnsupported,
+		reflect.Slice:         decodeSlice,
+		reflect.String:        decodeString,
+		reflect.Struct:        decodeUnsupported,
+		reflect.UnsafePointer: decodeUnsupported,
+	}
+
+	// Global map of struct field specs that is populated once for every new
+	// struct type that is scanned. This caches the field types and the corresponding
+	// decoder functions to avoid iterating through struct fields on subsequent scans.
+	globalStructMap = newStructMap()
+)
+
+func Struct(dst interface{}) (StructValue, error) {
+	v := reflect.ValueOf(dst)
+
+	// The destination to scan into should be a struct pointer.
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
+	}
+
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
+	}
+
+	return StructValue{
+		spec:  globalStructMap.get(v.Type()),
+		value: v,
+	}, nil
+}
+
+// Scan scans the results from a key-value Redis map result set to a destination struct.
+// The Redis keys are matched to the struct's field with the `redis` tag.
+func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
+	if len(keys) != len(vals) {
+		return errors.New("args should have the same number of keys and vals")
+	}
+
+	strct, err := Struct(dst)
+	if err != nil {
+		return err
+	}
+
+	// Iterate through the (key, value) sequence.
+	for i := 0; i < len(vals); i++ {
+		key, ok := keys[i].(string)
+		if !ok {
+			continue
+		}
+
+		val, ok := vals[i].(string)
+		if !ok {
+			continue
+		}
+
+		if err := strct.Scan(key, val); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func decodeBool(f reflect.Value, s string) error {
+	b, err := strconv.ParseBool(s)
+	if err != nil {
+		return err
+	}
+	f.SetBool(b)
+	return nil
+}
+
+func decodeInt8(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 8)
+}
+
+func decodeInt16(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 16)
+}
+
+func decodeInt32(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 32)
+}
+
+func decodeInt64(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 64)
+}
+
+func decodeInt(f reflect.Value, s string) error {
+	return decodeNumber(f, s, 0)
+}
+
+func decodeNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseInt(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetInt(v)
+	return nil
+}
+
+func decodeUint8(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 8)
+}
+
+func decodeUint16(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 16)
+}
+
+func decodeUint32(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 32)
+}
+
+func decodeUint64(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 64)
+}
+
+func decodeUint(f reflect.Value, s string) error {
+	return decodeUnsignedNumber(f, s, 0)
+}
+
+func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
+	v, err := strconv.ParseUint(s, 10, bitSize)
+	if err != nil {
+		return err
+	}
+	f.SetUint(v)
+	return nil
+}
+
+func decodeFloat32(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 32)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+// although the default is float64, but we better define it.
+func decodeFloat64(f reflect.Value, s string) error {
+	v, err := strconv.ParseFloat(s, 64)
+	if err != nil {
+		return err
+	}
+	f.SetFloat(v)
+	return nil
+}
+
+func decodeString(f reflect.Value, s string) error {
+	f.SetString(s)
+	return nil
+}
+
+func decodeSlice(f reflect.Value, s string) error {
+	// []byte slice ([]uint8).
+	if f.Type().Elem().Kind() == reflect.Uint8 {
+		f.SetBytes([]byte(s))
+	}
+	return nil
+}
+
+func decodeUnsupported(v reflect.Value, s string) error {
+	return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
new file mode 100644
index 0000000..6839412
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
@@ -0,0 +1,93 @@
+package hscan
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+)
+
+// structMap contains the map of struct fields for target structs
+// indexed by the struct type.
+type structMap struct {
+	m sync.Map
+}
+
+func newStructMap() *structMap {
+	return new(structMap)
+}
+
+func (s *structMap) get(t reflect.Type) *structSpec {
+	if v, ok := s.m.Load(t); ok {
+		return v.(*structSpec)
+	}
+
+	spec := newStructSpec(t, "redis")
+	s.m.Store(t, spec)
+	return spec
+}
+
+//------------------------------------------------------------------------------
+
+// structSpec contains the list of all fields in a target struct.
+type structSpec struct {
+	m map[string]*structField
+}
+
+func (s *structSpec) set(tag string, sf *structField) {
+	s.m[tag] = sf
+}
+
+func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
+	numField := t.NumField()
+	out := &structSpec{
+		m: make(map[string]*structField, numField),
+	}
+
+	for i := 0; i < numField; i++ {
+		f := t.Field(i)
+
+		tag := f.Tag.Get(fieldTag)
+		if tag == "" || tag == "-" {
+			continue
+		}
+
+		tag = strings.Split(tag, ",")[0]
+		if tag == "" {
+			continue
+		}
+
+		// Use the built-in decoder.
+		out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
+	}
+
+	return out
+}
+
+//------------------------------------------------------------------------------
+
+// structField represents a single field in a target struct.
+type structField struct {
+	index int
+	fn    decoderFunc
+}
+
+//------------------------------------------------------------------------------
+
+type StructValue struct {
+	spec  *structSpec
+	value reflect.Value
+}
+
+func (s StructValue) Scan(key string, value string) error {
+	field, ok := s.spec.m[key]
+	if !ok {
+		return nil
+	}
+	if err := field.fn(s.value.Field(field.index), value); err != nil {
+		t := s.value.Type()
+		return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
+			value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
+	}
+	return nil
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go
new file mode 100644
index 0000000..4a59c59
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/internal.go
@@ -0,0 +1,29 @@
+package internal
+
+import (
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
+	if retry < 0 {
+		panic("not reached")
+	}
+	if minBackoff == 0 {
+		return 0
+	}
+
+	d := minBackoff << uint(retry)
+	if d < minBackoff {
+		return maxBackoff
+	}
+
+	d = minBackoff + time.Duration(rand.Int63n(int64(d)))
+
+	if d > maxBackoff || d < minBackoff {
+		d = maxBackoff
+	}
+
+	return d
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
new file mode 100644
index 0000000..c8b9213
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/log.go
@@ -0,0 +1,26 @@
+package internal
+
+import (
+	"context"
+	"fmt"
+	"log"
+	"os"
+)
+
+type Logging interface {
+	Printf(ctx context.Context, format string, v ...interface{})
+}
+
+type logger struct {
+	log *log.Logger
+}
+
+func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
+	_ = l.log.Output(2, fmt.Sprintf(format, v...))
+}
+
+// Logger calls Output to print to the stderr.
+// Arguments are handled in the manner of fmt.Print.
+var Logger Logging = &logger{
+	log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go
new file mode 100644
index 0000000..64f4627
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/once.go
@@ -0,0 +1,60 @@
+/*
+Copyright 2014 The Camlistore Authors
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package internal
+
+import (
+	"sync"
+	"sync/atomic"
+)
+
+// A Once will perform a successful action exactly once.
+//
+// Unlike a sync.Once, this Once's func returns an error
+// and is re-armed on failure.
+type Once struct {
+	m    sync.Mutex
+	done uint32
+}
+
+// Do calls the function f if and only if Do has not been invoked
+// without error for this instance of Once.  In other words, given
+// 	var once Once
+// if once.Do(f) is called multiple times, only the first call will
+// invoke f, even if f has a different value in each invocation unless
+// f returns an error.  A new instance of Once is required for each
+// function to execute.
+//
+// Do is intended for initialization that must be run exactly once.  Since f
+// is niladic, it may be necessary to use a function literal to capture the
+// arguments to a function to be invoked by Do:
+// 	err := config.once.Do(func() error { return config.init(filename) })
+func (o *Once) Do(f func() error) error {
+	if atomic.LoadUint32(&o.done) == 1 {
+		return nil
+	}
+	// Slow-path.
+	o.m.Lock()
+	defer o.m.Unlock()
+	var err error
+	if o.done == 0 {
+		err = f()
+		if err == nil {
+			atomic.StoreUint32(&o.done, 1)
+		}
+	}
+	return err
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
new file mode 100644
index 0000000..5661659
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
@@ -0,0 +1,121 @@
+package pool
+
+import (
+	"bufio"
+	"context"
+	"net"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+var noDeadline = time.Time{}
+
+type Conn struct {
+	usedAt  int64 // atomic
+	netConn net.Conn
+
+	rd *proto.Reader
+	bw *bufio.Writer
+	wr *proto.Writer
+
+	Inited    bool
+	pooled    bool
+	createdAt time.Time
+}
+
+func NewConn(netConn net.Conn) *Conn {
+	cn := &Conn{
+		netConn:   netConn,
+		createdAt: time.Now(),
+	}
+	cn.rd = proto.NewReader(netConn)
+	cn.bw = bufio.NewWriter(netConn)
+	cn.wr = proto.NewWriter(cn.bw)
+	cn.SetUsedAt(time.Now())
+	return cn
+}
+
+func (cn *Conn) UsedAt() time.Time {
+	unix := atomic.LoadInt64(&cn.usedAt)
+	return time.Unix(unix, 0)
+}
+
+func (cn *Conn) SetUsedAt(tm time.Time) {
+	atomic.StoreInt64(&cn.usedAt, tm.Unix())
+}
+
+func (cn *Conn) SetNetConn(netConn net.Conn) {
+	cn.netConn = netConn
+	cn.rd.Reset(netConn)
+	cn.bw.Reset(netConn)
+}
+
+func (cn *Conn) Write(b []byte) (int, error) {
+	return cn.netConn.Write(b)
+}
+
+func (cn *Conn) RemoteAddr() net.Addr {
+	if cn.netConn != nil {
+		return cn.netConn.RemoteAddr()
+	}
+	return nil
+}
+
+func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
+	if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
+	return fn(cn.rd)
+}
+
+func (cn *Conn) WithWriter(
+	ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
+) error {
+	if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
+		return err
+	}
+
+	if cn.bw.Buffered() > 0 {
+		cn.bw.Reset(cn.netConn)
+	}
+
+	if err := fn(cn.wr); err != nil {
+		return err
+	}
+
+	return cn.bw.Flush()
+}
+
+func (cn *Conn) Close() error {
+	return cn.netConn.Close()
+}
+
+func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
+	tm := time.Now()
+	cn.SetUsedAt(tm)
+
+	if timeout > 0 {
+		tm = tm.Add(timeout)
+	}
+
+	if ctx != nil {
+		deadline, ok := ctx.Deadline()
+		if ok {
+			if timeout == 0 {
+				return deadline
+			}
+			if deadline.Before(tm) {
+				return deadline
+			}
+			return tm
+		}
+	}
+
+	if timeout > 0 {
+		return tm
+	}
+
+	return noDeadline
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
new file mode 100644
index 0000000..44a4e77
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
@@ -0,0 +1,557 @@
+package pool
+
+import (
+	"context"
+	"errors"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+)
+
+var (
+	// ErrClosed performs any operation on the closed client will return this error.
+	ErrClosed = errors.New("redis: client is closed")
+
+	// ErrPoolTimeout timed out waiting to get a connection from the connection pool.
+	ErrPoolTimeout = errors.New("redis: connection pool timeout")
+)
+
+var timers = sync.Pool{
+	New: func() interface{} {
+		t := time.NewTimer(time.Hour)
+		t.Stop()
+		return t
+	},
+}
+
+// Stats contains pool state information and accumulated stats.
+type Stats struct {
+	Hits     uint32 // number of times free connection was found in the pool
+	Misses   uint32 // number of times free connection was NOT found in the pool
+	Timeouts uint32 // number of times a wait timeout occurred
+
+	TotalConns uint32 // number of total connections in the pool
+	IdleConns  uint32 // number of idle connections in the pool
+	StaleConns uint32 // number of stale connections removed from the pool
+}
+
+type Pooler interface {
+	NewConn(context.Context) (*Conn, error)
+	CloseConn(*Conn) error
+
+	Get(context.Context) (*Conn, error)
+	Put(context.Context, *Conn)
+	Remove(context.Context, *Conn, error)
+
+	Len() int
+	IdleLen() int
+	Stats() *Stats
+
+	Close() error
+}
+
+type Options struct {
+	Dialer  func(context.Context) (net.Conn, error)
+	OnClose func(*Conn) error
+
+	PoolFIFO           bool
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+}
+
+type lastDialErrorWrap struct {
+	err error
+}
+
+type ConnPool struct {
+	opt *Options
+
+	dialErrorsNum uint32 // atomic
+
+	lastDialError atomic.Value
+
+	queue chan struct{}
+
+	connsMu      sync.Mutex
+	conns        []*Conn
+	idleConns    []*Conn
+	poolSize     int
+	idleConnsLen int
+
+	stats Stats
+
+	_closed  uint32 // atomic
+	closedCh chan struct{}
+}
+
+var _ Pooler = (*ConnPool)(nil)
+
+func NewConnPool(opt *Options) *ConnPool {
+	p := &ConnPool{
+		opt: opt,
+
+		queue:     make(chan struct{}, opt.PoolSize),
+		conns:     make([]*Conn, 0, opt.PoolSize),
+		idleConns: make([]*Conn, 0, opt.PoolSize),
+		closedCh:  make(chan struct{}),
+	}
+
+	p.connsMu.Lock()
+	p.checkMinIdleConns()
+	p.connsMu.Unlock()
+
+	if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
+		go p.reaper(opt.IdleCheckFrequency)
+	}
+
+	return p
+}
+
+func (p *ConnPool) checkMinIdleConns() {
+	if p.opt.MinIdleConns == 0 {
+		return
+	}
+	for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
+		p.poolSize++
+		p.idleConnsLen++
+
+		go func() {
+			err := p.addIdleConn()
+			if err != nil && err != ErrClosed {
+				p.connsMu.Lock()
+				p.poolSize--
+				p.idleConnsLen--
+				p.connsMu.Unlock()
+			}
+		}()
+	}
+}
+
+func (p *ConnPool) addIdleConn() error {
+	cn, err := p.dialConn(context.TODO(), true)
+	if err != nil {
+		return err
+	}
+
+	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return ErrClosed
+	}
+
+	p.conns = append(p.conns, cn)
+	p.idleConns = append(p.idleConns, cn)
+	return nil
+}
+
+func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.newConn(ctx, false)
+}
+
+func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
+	cn, err := p.dialConn(ctx, pooled)
+	if err != nil {
+		return nil, err
+	}
+
+	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	// It is not allowed to add new connections to the closed connection pool.
+	if p.closed() {
+		_ = cn.Close()
+		return nil, ErrClosed
+	}
+
+	p.conns = append(p.conns, cn)
+	if pooled {
+		// If pool is full remove the cn on next Put.
+		if p.poolSize >= p.opt.PoolSize {
+			cn.pooled = false
+		} else {
+			p.poolSize++
+		}
+	}
+
+	return cn, nil
+}
+
+func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+
+	if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
+		return nil, p.getLastDialError()
+	}
+
+	netConn, err := p.opt.Dialer(ctx)
+	if err != nil {
+		p.setLastDialError(err)
+		if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
+			go p.tryDial()
+		}
+		return nil, err
+	}
+
+	cn := NewConn(netConn)
+	cn.pooled = pooled
+	return cn, nil
+}
+
+func (p *ConnPool) tryDial() {
+	for {
+		if p.closed() {
+			return
+		}
+
+		conn, err := p.opt.Dialer(context.Background())
+		if err != nil {
+			p.setLastDialError(err)
+			time.Sleep(time.Second)
+			continue
+		}
+
+		atomic.StoreUint32(&p.dialErrorsNum, 0)
+		_ = conn.Close()
+		return
+	}
+}
+
+func (p *ConnPool) setLastDialError(err error) {
+	p.lastDialError.Store(&lastDialErrorWrap{err: err})
+}
+
+func (p *ConnPool) getLastDialError() error {
+	err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
+	if err != nil {
+		return err.err
+	}
+	return nil
+}
+
+// Get returns existed connection from the pool or creates a new one.
+func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+
+	if err := p.waitTurn(ctx); err != nil {
+		return nil, err
+	}
+
+	for {
+		p.connsMu.Lock()
+		cn, err := p.popIdle()
+		p.connsMu.Unlock()
+
+		if err != nil {
+			return nil, err
+		}
+
+		if cn == nil {
+			break
+		}
+
+		if p.isStaleConn(cn) {
+			_ = p.CloseConn(cn)
+			continue
+		}
+
+		atomic.AddUint32(&p.stats.Hits, 1)
+		return cn, nil
+	}
+
+	atomic.AddUint32(&p.stats.Misses, 1)
+
+	newcn, err := p.newConn(ctx, true)
+	if err != nil {
+		p.freeTurn()
+		return nil, err
+	}
+
+	return newcn, nil
+}
+
+func (p *ConnPool) getTurn() {
+	p.queue <- struct{}{}
+}
+
+func (p *ConnPool) waitTurn(ctx context.Context) error {
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	default:
+	}
+
+	select {
+	case p.queue <- struct{}{}:
+		return nil
+	default:
+	}
+
+	timer := timers.Get().(*time.Timer)
+	timer.Reset(p.opt.PoolTimeout)
+
+	select {
+	case <-ctx.Done():
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+		return ctx.Err()
+	case p.queue <- struct{}{}:
+		if !timer.Stop() {
+			<-timer.C
+		}
+		timers.Put(timer)
+		return nil
+	case <-timer.C:
+		timers.Put(timer)
+		atomic.AddUint32(&p.stats.Timeouts, 1)
+		return ErrPoolTimeout
+	}
+}
+
+func (p *ConnPool) freeTurn() {
+	<-p.queue
+}
+
+func (p *ConnPool) popIdle() (*Conn, error) {
+	if p.closed() {
+		return nil, ErrClosed
+	}
+	n := len(p.idleConns)
+	if n == 0 {
+		return nil, nil
+	}
+
+	var cn *Conn
+	if p.opt.PoolFIFO {
+		cn = p.idleConns[0]
+		copy(p.idleConns, p.idleConns[1:])
+		p.idleConns = p.idleConns[:n-1]
+	} else {
+		idx := n - 1
+		cn = p.idleConns[idx]
+		p.idleConns = p.idleConns[:idx]
+	}
+	p.idleConnsLen--
+	p.checkMinIdleConns()
+	return cn, nil
+}
+
+func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
+	if cn.rd.Buffered() > 0 {
+		internal.Logger.Printf(ctx, "Conn has unread data")
+		p.Remove(ctx, cn, BadConnError{})
+		return
+	}
+
+	if !cn.pooled {
+		p.Remove(ctx, cn, nil)
+		return
+	}
+
+	p.connsMu.Lock()
+	p.idleConns = append(p.idleConns, cn)
+	p.idleConnsLen++
+	p.connsMu.Unlock()
+	p.freeTurn()
+}
+
+func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+	p.removeConnWithLock(cn)
+	p.freeTurn()
+	_ = p.closeConn(cn)
+}
+
+func (p *ConnPool) CloseConn(cn *Conn) error {
+	p.removeConnWithLock(cn)
+	return p.closeConn(cn)
+}
+
+func (p *ConnPool) removeConnWithLock(cn *Conn) {
+	p.connsMu.Lock()
+	p.removeConn(cn)
+	p.connsMu.Unlock()
+}
+
+func (p *ConnPool) removeConn(cn *Conn) {
+	for i, c := range p.conns {
+		if c == cn {
+			p.conns = append(p.conns[:i], p.conns[i+1:]...)
+			if cn.pooled {
+				p.poolSize--
+				p.checkMinIdleConns()
+			}
+			return
+		}
+	}
+}
+
+func (p *ConnPool) closeConn(cn *Conn) error {
+	if p.opt.OnClose != nil {
+		_ = p.opt.OnClose(cn)
+	}
+	return cn.Close()
+}
+
+// Len returns total number of connections.
+func (p *ConnPool) Len() int {
+	p.connsMu.Lock()
+	n := len(p.conns)
+	p.connsMu.Unlock()
+	return n
+}
+
+// IdleLen returns number of idle connections.
+func (p *ConnPool) IdleLen() int {
+	p.connsMu.Lock()
+	n := p.idleConnsLen
+	p.connsMu.Unlock()
+	return n
+}
+
+func (p *ConnPool) Stats() *Stats {
+	idleLen := p.IdleLen()
+	return &Stats{
+		Hits:     atomic.LoadUint32(&p.stats.Hits),
+		Misses:   atomic.LoadUint32(&p.stats.Misses),
+		Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
+
+		TotalConns: uint32(p.Len()),
+		IdleConns:  uint32(idleLen),
+		StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
+	}
+}
+
+func (p *ConnPool) closed() bool {
+	return atomic.LoadUint32(&p._closed) == 1
+}
+
+func (p *ConnPool) Filter(fn func(*Conn) bool) error {
+	p.connsMu.Lock()
+	defer p.connsMu.Unlock()
+
+	var firstErr error
+	for _, cn := range p.conns {
+		if fn(cn) {
+			if err := p.closeConn(cn); err != nil && firstErr == nil {
+				firstErr = err
+			}
+		}
+	}
+	return firstErr
+}
+
+func (p *ConnPool) Close() error {
+	if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
+		return ErrClosed
+	}
+	close(p.closedCh)
+
+	var firstErr error
+	p.connsMu.Lock()
+	for _, cn := range p.conns {
+		if err := p.closeConn(cn); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	p.conns = nil
+	p.poolSize = 0
+	p.idleConns = nil
+	p.idleConnsLen = 0
+	p.connsMu.Unlock()
+
+	return firstErr
+}
+
+func (p *ConnPool) reaper(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			// It is possible that ticker and closedCh arrive together,
+			// and select pseudo-randomly pick ticker case, we double
+			// check here to prevent being executed after closed.
+			if p.closed() {
+				return
+			}
+			_, err := p.ReapStaleConns()
+			if err != nil {
+				internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
+				continue
+			}
+		case <-p.closedCh:
+			return
+		}
+	}
+}
+
+func (p *ConnPool) ReapStaleConns() (int, error) {
+	var n int
+	for {
+		p.getTurn()
+
+		p.connsMu.Lock()
+		cn := p.reapStaleConn()
+		p.connsMu.Unlock()
+
+		p.freeTurn()
+
+		if cn != nil {
+			_ = p.closeConn(cn)
+			n++
+		} else {
+			break
+		}
+	}
+	atomic.AddUint32(&p.stats.StaleConns, uint32(n))
+	return n, nil
+}
+
+func (p *ConnPool) reapStaleConn() *Conn {
+	if len(p.idleConns) == 0 {
+		return nil
+	}
+
+	cn := p.idleConns[0]
+	if !p.isStaleConn(cn) {
+		return nil
+	}
+
+	p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
+	p.idleConnsLen--
+	p.removeConn(cn)
+
+	return cn
+}
+
+func (p *ConnPool) isStaleConn(cn *Conn) bool {
+	if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
+		return false
+	}
+
+	now := time.Now()
+	if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
+		return true
+	}
+	if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
+		return true
+	}
+
+	return false
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
new file mode 100644
index 0000000..5a3fde1
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
@@ -0,0 +1,58 @@
+package pool
+
+import "context"
+
+type SingleConnPool struct {
+	pool      Pooler
+	cn        *Conn
+	stickyErr error
+}
+
+var _ Pooler = (*SingleConnPool)(nil)
+
+func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
+	return &SingleConnPool{
+		pool: pool,
+		cn:   cn,
+	}
+}
+
+func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.pool.NewConn(ctx)
+}
+
+func (p *SingleConnPool) CloseConn(cn *Conn) error {
+	return p.pool.CloseConn(cn)
+}
+
+func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
+	if p.stickyErr != nil {
+		return nil, p.stickyErr
+	}
+	return p.cn, nil
+}
+
+func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
+
+func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+	p.cn = nil
+	p.stickyErr = reason
+}
+
+func (p *SingleConnPool) Close() error {
+	p.cn = nil
+	p.stickyErr = ErrClosed
+	return nil
+}
+
+func (p *SingleConnPool) Len() int {
+	return 0
+}
+
+func (p *SingleConnPool) IdleLen() int {
+	return 0
+}
+
+func (p *SingleConnPool) Stats() *Stats {
+	return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
new file mode 100644
index 0000000..3adb99b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
@@ -0,0 +1,201 @@
+package pool
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync/atomic"
+)
+
+const (
+	stateDefault = 0
+	stateInited  = 1
+	stateClosed  = 2
+)
+
+type BadConnError struct {
+	wrapped error
+}
+
+var _ error = (*BadConnError)(nil)
+
+func (e BadConnError) Error() string {
+	s := "redis: Conn is in a bad state"
+	if e.wrapped != nil {
+		s += ": " + e.wrapped.Error()
+	}
+	return s
+}
+
+func (e BadConnError) Unwrap() error {
+	return e.wrapped
+}
+
+//------------------------------------------------------------------------------
+
+type StickyConnPool struct {
+	pool   Pooler
+	shared int32 // atomic
+
+	state uint32 // atomic
+	ch    chan *Conn
+
+	_badConnError atomic.Value
+}
+
+var _ Pooler = (*StickyConnPool)(nil)
+
+func NewStickyConnPool(pool Pooler) *StickyConnPool {
+	p, ok := pool.(*StickyConnPool)
+	if !ok {
+		p = &StickyConnPool{
+			pool: pool,
+			ch:   make(chan *Conn, 1),
+		}
+	}
+	atomic.AddInt32(&p.shared, 1)
+	return p
+}
+
+func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
+	return p.pool.NewConn(ctx)
+}
+
+func (p *StickyConnPool) CloseConn(cn *Conn) error {
+	return p.pool.CloseConn(cn)
+}
+
+func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
+	// In worst case this races with Close which is not a very common operation.
+	for i := 0; i < 1000; i++ {
+		switch atomic.LoadUint32(&p.state) {
+		case stateDefault:
+			cn, err := p.pool.Get(ctx)
+			if err != nil {
+				return nil, err
+			}
+			if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
+				return cn, nil
+			}
+			p.pool.Remove(ctx, cn, ErrClosed)
+		case stateInited:
+			if err := p.badConnError(); err != nil {
+				return nil, err
+			}
+			cn, ok := <-p.ch
+			if !ok {
+				return nil, ErrClosed
+			}
+			return cn, nil
+		case stateClosed:
+			return nil, ErrClosed
+		default:
+			panic("not reached")
+		}
+	}
+	return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
+}
+
+func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
+	defer func() {
+		if recover() != nil {
+			p.freeConn(ctx, cn)
+		}
+	}()
+	p.ch <- cn
+}
+
+func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
+	if err := p.badConnError(); err != nil {
+		p.pool.Remove(ctx, cn, err)
+	} else {
+		p.pool.Put(ctx, cn)
+	}
+}
+
+func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
+	defer func() {
+		if recover() != nil {
+			p.pool.Remove(ctx, cn, ErrClosed)
+		}
+	}()
+	p._badConnError.Store(BadConnError{wrapped: reason})
+	p.ch <- cn
+}
+
+func (p *StickyConnPool) Close() error {
+	if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
+		return nil
+	}
+
+	for i := 0; i < 1000; i++ {
+		state := atomic.LoadUint32(&p.state)
+		if state == stateClosed {
+			return ErrClosed
+		}
+		if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
+			close(p.ch)
+			cn, ok := <-p.ch
+			if ok {
+				p.freeConn(context.TODO(), cn)
+			}
+			return nil
+		}
+	}
+
+	return errors.New("redis: StickyConnPool.Close: infinite loop")
+}
+
+func (p *StickyConnPool) Reset(ctx context.Context) error {
+	if p.badConnError() == nil {
+		return nil
+	}
+
+	select {
+	case cn, ok := <-p.ch:
+		if !ok {
+			return ErrClosed
+		}
+		p.pool.Remove(ctx, cn, ErrClosed)
+		p._badConnError.Store(BadConnError{wrapped: nil})
+	default:
+		return errors.New("redis: StickyConnPool does not have a Conn")
+	}
+
+	if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
+		state := atomic.LoadUint32(&p.state)
+		return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
+	}
+
+	return nil
+}
+
+func (p *StickyConnPool) badConnError() error {
+	if v := p._badConnError.Load(); v != nil {
+		if err := v.(BadConnError); err.wrapped != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *StickyConnPool) Len() int {
+	switch atomic.LoadUint32(&p.state) {
+	case stateDefault:
+		return 0
+	case stateInited:
+		return 1
+	case stateClosed:
+		return 0
+	default:
+		panic("not reached")
+	}
+}
+
+func (p *StickyConnPool) IdleLen() int {
+	return len(p.ch)
+}
+
+func (p *StickyConnPool) Stats() *Stats {
+	return &Stats{}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
new file mode 100644
index 0000000..0e6ca77
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
@@ -0,0 +1,332 @@
+package proto
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+// redis resp protocol data type.
+const (
+	ErrorReply  = '-'
+	StatusReply = '+'
+	IntReply    = ':'
+	StringReply = '$'
+	ArrayReply  = '*'
+)
+
+//------------------------------------------------------------------------------
+
+const Nil = RedisError("redis: nil") // nolint:errname
+
+type RedisError string
+
+func (e RedisError) Error() string { return string(e) }
+
+func (RedisError) RedisError() {}
+
+//------------------------------------------------------------------------------
+
+type MultiBulkParse func(*Reader, int64) (interface{}, error)
+
+type Reader struct {
+	rd   *bufio.Reader
+	_buf []byte
+}
+
+func NewReader(rd io.Reader) *Reader {
+	return &Reader{
+		rd:   bufio.NewReader(rd),
+		_buf: make([]byte, 64),
+	}
+}
+
+func (r *Reader) Buffered() int {
+	return r.rd.Buffered()
+}
+
+func (r *Reader) Peek(n int) ([]byte, error) {
+	return r.rd.Peek(n)
+}
+
+func (r *Reader) Reset(rd io.Reader) {
+	r.rd.Reset(rd)
+}
+
+func (r *Reader) ReadLine() ([]byte, error) {
+	line, err := r.readLine()
+	if err != nil {
+		return nil, err
+	}
+	if isNilReply(line) {
+		return nil, Nil
+	}
+	return line, nil
+}
+
+// readLine that returns an error if:
+//   - there is a pending read error;
+//   - or line does not end with \r\n.
+func (r *Reader) readLine() ([]byte, error) {
+	b, err := r.rd.ReadSlice('\n')
+	if err != nil {
+		if err != bufio.ErrBufferFull {
+			return nil, err
+		}
+
+		full := make([]byte, len(b))
+		copy(full, b)
+
+		b, err = r.rd.ReadBytes('\n')
+		if err != nil {
+			return nil, err
+		}
+
+		full = append(full, b...) //nolint:makezero
+		b = full
+	}
+	if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
+		return nil, fmt.Errorf("redis: invalid reply: %q", b)
+	}
+	return b[:len(b)-2], nil
+}
+
+func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StatusReply:
+		return string(line[1:]), nil
+	case IntReply:
+		return util.ParseInt(line[1:], 10, 64)
+	case StringReply:
+		return r.readStringReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		if m == nil {
+			err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
+			return nil, err
+		}
+		return m(r, n)
+	}
+	return nil, fmt.Errorf("redis: can't parse %.100q", line)
+}
+
+func (r *Reader) ReadIntReply() (int64, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case IntReply:
+		return util.ParseInt(line[1:], 10, 64)
+	default:
+		return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadString() (string, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return "", err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return "", ParseErrorReply(line)
+	case StringReply:
+		return r.readStringReply(line)
+	case StatusReply:
+		return string(line[1:]), nil
+	case IntReply:
+		return string(line[1:]), nil
+	default:
+		return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
+	}
+}
+
+func (r *Reader) readStringReply(line []byte) (string, error) {
+	if isNilReply(line) {
+		return "", Nil
+	}
+
+	replyLen, err := util.Atoi(line[1:])
+	if err != nil {
+		return "", err
+	}
+
+	b := make([]byte, replyLen+2)
+	_, err = io.ReadFull(r.rd, b)
+	if err != nil {
+		return "", err
+	}
+
+	return util.BytesToString(b[:replyLen]), nil
+}
+
+func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return nil, err
+		}
+		return m(r, n)
+	default:
+		return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadArrayLen() (int, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return 0, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return 0, ParseErrorReply(line)
+	case ArrayReply:
+		n, err := parseArrayLen(line)
+		if err != nil {
+			return 0, err
+		}
+		return int(n), nil
+	default:
+		return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
+	}
+}
+
+func (r *Reader) ReadScanReply() ([]string, uint64, error) {
+	n, err := r.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+	if n != 2 {
+		return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
+	}
+
+	cursor, err := r.ReadUint()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	n, err = r.ReadArrayLen()
+	if err != nil {
+		return nil, 0, err
+	}
+
+	keys := make([]string, n)
+
+	for i := 0; i < n; i++ {
+		key, err := r.ReadString()
+		if err != nil {
+			return nil, 0, err
+		}
+		keys[i] = key
+	}
+
+	return keys, cursor, err
+}
+
+func (r *Reader) ReadInt() (int64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseInt(b, 10, 64)
+}
+
+func (r *Reader) ReadUint() (uint64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseUint(b, 10, 64)
+}
+
+func (r *Reader) ReadFloatReply() (float64, error) {
+	b, err := r.readTmpBytesReply()
+	if err != nil {
+		return 0, err
+	}
+	return util.ParseFloat(b, 64)
+}
+
+func (r *Reader) readTmpBytesReply() ([]byte, error) {
+	line, err := r.ReadLine()
+	if err != nil {
+		return nil, err
+	}
+	switch line[0] {
+	case ErrorReply:
+		return nil, ParseErrorReply(line)
+	case StringReply:
+		return r._readTmpBytesReply(line)
+	case StatusReply:
+		return line[1:], nil
+	default:
+		return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
+	}
+}
+
+func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
+	if isNilReply(line) {
+		return nil, Nil
+	}
+
+	replyLen, err := util.Atoi(line[1:])
+	if err != nil {
+		return nil, err
+	}
+
+	buf := r.buf(replyLen + 2)
+	_, err = io.ReadFull(r.rd, buf)
+	if err != nil {
+		return nil, err
+	}
+
+	return buf[:replyLen], nil
+}
+
+func (r *Reader) buf(n int) []byte {
+	if n <= cap(r._buf) {
+		return r._buf[:n]
+	}
+	d := n - cap(r._buf)
+	r._buf = append(r._buf, make([]byte, d)...)
+	return r._buf
+}
+
+func isNilReply(b []byte) bool {
+	return len(b) == 3 &&
+		(b[0] == StringReply || b[0] == ArrayReply) &&
+		b[1] == '-' && b[2] == '1'
+}
+
+func ParseErrorReply(line []byte) error {
+	return RedisError(string(line[1:]))
+}
+
+func parseArrayLen(line []byte) (int64, error) {
+	if isNilReply(line) {
+		return 0, Nil
+	}
+	return util.ParseInt(line[1:], 10, 64)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
new file mode 100644
index 0000000..0e99476
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
@@ -0,0 +1,180 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"reflect"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+// Scan parses bytes `b` to `v` with appropriate type.
+//nolint:gocyclo
+func Scan(b []byte, v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return fmt.Errorf("redis: Scan(nil)")
+	case *string:
+		*v = util.BytesToString(b)
+		return nil
+	case *[]byte:
+		*v = b
+		return nil
+	case *int:
+		var err error
+		*v, err = util.Atoi(b)
+		return err
+	case *int8:
+		n, err := util.ParseInt(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = int8(n)
+		return nil
+	case *int16:
+		n, err := util.ParseInt(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = int16(n)
+		return nil
+	case *int32:
+		n, err := util.ParseInt(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = int32(n)
+		return nil
+	case *int64:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *uint:
+		n, err := util.ParseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = uint(n)
+		return nil
+	case *uint8:
+		n, err := util.ParseUint(b, 10, 8)
+		if err != nil {
+			return err
+		}
+		*v = uint8(n)
+		return nil
+	case *uint16:
+		n, err := util.ParseUint(b, 10, 16)
+		if err != nil {
+			return err
+		}
+		*v = uint16(n)
+		return nil
+	case *uint32:
+		n, err := util.ParseUint(b, 10, 32)
+		if err != nil {
+			return err
+		}
+		*v = uint32(n)
+		return nil
+	case *uint64:
+		n, err := util.ParseUint(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = n
+		return nil
+	case *float32:
+		n, err := util.ParseFloat(b, 32)
+		if err != nil {
+			return err
+		}
+		*v = float32(n)
+		return err
+	case *float64:
+		var err error
+		*v, err = util.ParseFloat(b, 64)
+		return err
+	case *bool:
+		*v = len(b) == 1 && b[0] == '1'
+		return nil
+	case *time.Time:
+		var err error
+		*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
+		return err
+	case *time.Duration:
+		n, err := util.ParseInt(b, 10, 64)
+		if err != nil {
+			return err
+		}
+		*v = time.Duration(n)
+		return nil
+	case encoding.BinaryUnmarshaler:
+		return v.UnmarshalBinary(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
+	}
+}
+
+func ScanSlice(data []string, slice interface{}) error {
+	v := reflect.ValueOf(slice)
+	if !v.IsValid() {
+		return fmt.Errorf("redis: ScanSlice(nil)")
+	}
+	if v.Kind() != reflect.Ptr {
+		return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Slice {
+		return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
+	}
+
+	next := makeSliceNextElemFunc(v)
+	for i, s := range data {
+		elem := next()
+		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
+			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
+			return err
+		}
+	}
+
+	return nil
+}
+
+func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
+	elemType := v.Type().Elem()
+
+	if elemType.Kind() == reflect.Ptr {
+		elemType = elemType.Elem()
+		return func() reflect.Value {
+			if v.Len() < v.Cap() {
+				v.Set(v.Slice(0, v.Len()+1))
+				elem := v.Index(v.Len() - 1)
+				if elem.IsNil() {
+					elem.Set(reflect.New(elemType))
+				}
+				return elem.Elem()
+			}
+
+			elem := reflect.New(elemType)
+			v.Set(reflect.Append(v, elem))
+			return elem.Elem()
+		}
+	}
+
+	zero := reflect.Zero(elemType)
+	return func() reflect.Value {
+		if v.Len() < v.Cap() {
+			v.Set(v.Slice(0, v.Len()+1))
+			return v.Index(v.Len() - 1)
+		}
+
+		v.Set(reflect.Append(v, zero))
+		return v.Index(v.Len() - 1)
+	}
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
new file mode 100644
index 0000000..c426098
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
@@ -0,0 +1,155 @@
+package proto
+
+import (
+	"encoding"
+	"fmt"
+	"io"
+	"strconv"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+type writer interface {
+	io.Writer
+	io.ByteWriter
+	// io.StringWriter
+	WriteString(s string) (n int, err error)
+}
+
+type Writer struct {
+	writer
+
+	lenBuf []byte
+	numBuf []byte
+}
+
+func NewWriter(wr writer) *Writer {
+	return &Writer{
+		writer: wr,
+
+		lenBuf: make([]byte, 64),
+		numBuf: make([]byte, 64),
+	}
+}
+
+func (w *Writer) WriteArgs(args []interface{}) error {
+	if err := w.WriteByte(ArrayReply); err != nil {
+		return err
+	}
+
+	if err := w.writeLen(len(args)); err != nil {
+		return err
+	}
+
+	for _, arg := range args {
+		if err := w.WriteArg(arg); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+func (w *Writer) writeLen(n int) error {
+	w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
+	w.lenBuf = append(w.lenBuf, '\r', '\n')
+	_, err := w.Write(w.lenBuf)
+	return err
+}
+
+func (w *Writer) WriteArg(v interface{}) error {
+	switch v := v.(type) {
+	case nil:
+		return w.string("")
+	case string:
+		return w.string(v)
+	case []byte:
+		return w.bytes(v)
+	case int:
+		return w.int(int64(v))
+	case int8:
+		return w.int(int64(v))
+	case int16:
+		return w.int(int64(v))
+	case int32:
+		return w.int(int64(v))
+	case int64:
+		return w.int(v)
+	case uint:
+		return w.uint(uint64(v))
+	case uint8:
+		return w.uint(uint64(v))
+	case uint16:
+		return w.uint(uint64(v))
+	case uint32:
+		return w.uint(uint64(v))
+	case uint64:
+		return w.uint(v)
+	case float32:
+		return w.float(float64(v))
+	case float64:
+		return w.float(v)
+	case bool:
+		if v {
+			return w.int(1)
+		}
+		return w.int(0)
+	case time.Time:
+		w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
+		return w.bytes(w.numBuf)
+	case time.Duration:
+		return w.int(v.Nanoseconds())
+	case encoding.BinaryMarshaler:
+		b, err := v.MarshalBinary()
+		if err != nil {
+			return err
+		}
+		return w.bytes(b)
+	default:
+		return fmt.Errorf(
+			"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
+	}
+}
+
+func (w *Writer) bytes(b []byte) error {
+	if err := w.WriteByte(StringReply); err != nil {
+		return err
+	}
+
+	if err := w.writeLen(len(b)); err != nil {
+		return err
+	}
+
+	if _, err := w.Write(b); err != nil {
+		return err
+	}
+
+	return w.crlf()
+}
+
+func (w *Writer) string(s string) error {
+	return w.bytes(util.StringToBytes(s))
+}
+
+func (w *Writer) uint(n uint64) error {
+	w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) int(n int64) error {
+	w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) float(f float64) error {
+	w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
+	return w.bytes(w.numBuf)
+}
+
+func (w *Writer) crlf() error {
+	if err := w.WriteByte('\r'); err != nil {
+		return err
+	}
+	return w.WriteByte('\n')
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
new file mode 100644
index 0000000..2edccba
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
@@ -0,0 +1,50 @@
+package rand
+
+import (
+	"math/rand"
+	"sync"
+)
+
+// Int returns a non-negative pseudo-random int.
+func Int() int { return pseudo.Int() }
+
+// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Intn(n int) int { return pseudo.Intn(n) }
+
+// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
+// It panics if n <= 0.
+func Int63n(n int64) int64 { return pseudo.Int63n(n) }
+
+// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
+func Perm(n int) []int { return pseudo.Perm(n) }
+
+// Seed uses the provided seed value to initialize the default Source to a
+// deterministic state. If Seed is not called, the generator behaves as if
+// seeded by Seed(1).
+func Seed(n int64) { pseudo.Seed(n) }
+
+var pseudo = rand.New(&source{src: rand.NewSource(1)})
+
+type source struct {
+	src rand.Source
+	mu  sync.Mutex
+}
+
+func (s *source) Int63() int64 {
+	s.mu.Lock()
+	n := s.src.Int63()
+	s.mu.Unlock()
+	return n
+}
+
+func (s *source) Seed(seed int64) {
+	s.mu.Lock()
+	s.src.Seed(seed)
+	s.mu.Unlock()
+}
+
+// Shuffle pseudo-randomizes the order of elements.
+// n is the number of elements.
+// swap swaps the elements with indexes i and j.
+func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/safe.go b/vendor/github.com/go-redis/redis/v8/internal/safe.go
new file mode 100644
index 0000000..fd2f434
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/safe.go
@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package internal
+
+func String(b []byte) string {
+	return string(b)
+}
+
+func Bytes(s string) []byte {
+	return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
new file mode 100644
index 0000000..9f2e418
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
@@ -0,0 +1,21 @@
+//go:build !appengine
+// +build !appengine
+
+package internal
+
+import "unsafe"
+
+// String converts byte slice to string.
+func String(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// Bytes converts string to byte slice.
+func Bytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(
+		&struct {
+			string
+			Cap int
+		}{s, len(s)},
+	))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
new file mode 100644
index 0000000..e34a7f0
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util.go
@@ -0,0 +1,46 @@
+package internal
+
+import (
+	"context"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/util"
+)
+
+func Sleep(ctx context.Context, dur time.Duration) error {
+	t := time.NewTimer(dur)
+	defer t.Stop()
+
+	select {
+	case <-t.C:
+		return nil
+	case <-ctx.Done():
+		return ctx.Err()
+	}
+}
+
+func ToLower(s string) string {
+	if isLower(s) {
+		return s
+	}
+
+	b := make([]byte, len(s))
+	for i := range b {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			c += 'a' - 'A'
+		}
+		b[i] = c
+	}
+	return util.BytesToString(b)
+}
+
+func isLower(s string) bool {
+	for i := 0; i < len(s); i++ {
+		c := s[i]
+		if c >= 'A' && c <= 'Z' {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/safe.go b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
new file mode 100644
index 0000000..2130711
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/safe.go
@@ -0,0 +1,12 @@
+//go:build appengine
+// +build appengine
+
+package util
+
+func BytesToString(b []byte) string {
+	return string(b)
+}
+
+func StringToBytes(s string) []byte {
+	return []byte(s)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
new file mode 100644
index 0000000..db50338
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
@@ -0,0 +1,19 @@
+package util
+
+import "strconv"
+
+func Atoi(b []byte) (int, error) {
+	return strconv.Atoi(BytesToString(b))
+}
+
+func ParseInt(b []byte, base int, bitSize int) (int64, error) {
+	return strconv.ParseInt(BytesToString(b), base, bitSize)
+}
+
+func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
+	return strconv.ParseUint(BytesToString(b), base, bitSize)
+}
+
+func ParseFloat(b []byte, bitSize int) (float64, error) {
+	return strconv.ParseFloat(BytesToString(b), bitSize)
+}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
new file mode 100644
index 0000000..daa8d76
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
@@ -0,0 +1,23 @@
+//go:build !appengine
+// +build !appengine
+
+package util
+
+import (
+	"unsafe"
+)
+
+// BytesToString converts byte slice to string.
+func BytesToString(b []byte) string {
+	return *(*string)(unsafe.Pointer(&b))
+}
+
+// StringToBytes converts string to byte slice.
+func StringToBytes(s string) []byte {
+	return *(*[]byte)(unsafe.Pointer(
+		&struct {
+			string
+			Cap int
+		}{s, len(s)},
+	))
+}
diff --git a/vendor/github.com/go-redis/redis/v8/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go
new file mode 100644
index 0000000..2f8bc2b
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/iterator.go
@@ -0,0 +1,77 @@
+package redis
+
+import (
+	"context"
+	"sync"
+)
+
+// ScanIterator is used to incrementally iterate over a collection of elements.
+// It's safe for concurrent use by multiple goroutines.
+type ScanIterator struct {
+	mu  sync.Mutex // protects Scanner and pos
+	cmd *ScanCmd
+	pos int
+}
+
+// Err returns the last iterator error, if any.
+func (it *ScanIterator) Err() error {
+	it.mu.Lock()
+	err := it.cmd.Err()
+	it.mu.Unlock()
+	return err
+}
+
+// Next advances the cursor and returns true if more values can be read.
+func (it *ScanIterator) Next(ctx context.Context) bool {
+	it.mu.Lock()
+	defer it.mu.Unlock()
+
+	// Instantly return on errors.
+	if it.cmd.Err() != nil {
+		return false
+	}
+
+	// Advance cursor, check if we are still within range.
+	if it.pos < len(it.cmd.page) {
+		it.pos++
+		return true
+	}
+
+	for {
+		// Return if there is no more data to fetch.
+		if it.cmd.cursor == 0 {
+			return false
+		}
+
+		// Fetch next page.
+		switch it.cmd.args[0] {
+		case "scan", "qscan":
+			it.cmd.args[1] = it.cmd.cursor
+		default:
+			it.cmd.args[2] = it.cmd.cursor
+		}
+
+		err := it.cmd.process(ctx, it.cmd)
+		if err != nil {
+			return false
+		}
+
+		it.pos = 1
+
+		// Redis can occasionally return empty page.
+		if len(it.cmd.page) > 0 {
+			return true
+		}
+	}
+}
+
+// Val returns the key/field at the current cursor position.
+func (it *ScanIterator) Val() string {
+	var v string
+	it.mu.Lock()
+	if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
+		v = it.cmd.page[it.pos-1]
+	}
+	it.mu.Unlock()
+	return v
+}
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
new file mode 100644
index 0000000..a4abe32
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/options.go
@@ -0,0 +1,429 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"net/url"
+	"runtime"
+	"sort"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+)
+
+// Limiter is the interface of a rate limiter or a circuit breaker.
+type Limiter interface {
+	// Allow returns nil if operation is allowed or an error otherwise.
+	// If operation is allowed client must ReportResult of the operation
+	// whether it is a success or a failure.
+	Allow() error
+	// ReportResult reports the result of the previously allowed operation.
+	// nil indicates a success, non-nil error usually indicates a failure.
+	ReportResult(result error)
+}
+
+// Options keeps the settings to setup redis connection.
+type Options struct {
+	// The network type, either tcp or unix.
+	// Default is tcp.
+	Network string
+	// host:port address.
+	Addr string
+
+	// Dialer creates new network connection and has priority over
+	// Network and Addr options.
+	Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
+
+	// Hook that is called when new connection is established.
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	// Use the specified Username to authenticate the current connection
+	// with one of the connections defined in the ACL list when connecting
+	// to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
+	Username string
+	// Optional password. Must match the password specified in the
+	// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
+	// or the User Password when connecting to a Redis 6.0 instance, or greater,
+	// that is using the Redis ACL system.
+	Password string
+
+	// Database to be selected after connecting to the server.
+	DB int
+
+	// Maximum number of retries before giving up.
+	// Default is 3 retries; -1 (not 0) disables retries.
+	MaxRetries int
+	// Minimum backoff between each retry.
+	// Default is 8 milliseconds; -1 disables backoff.
+	MinRetryBackoff time.Duration
+	// Maximum backoff between each retry.
+	// Default is 512 milliseconds; -1 disables backoff.
+	MaxRetryBackoff time.Duration
+
+	// Dial timeout for establishing new connections.
+	// Default is 5 seconds.
+	DialTimeout time.Duration
+	// Timeout for socket reads. If reached, commands will fail
+	// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
+	// Default is 3 seconds.
+	ReadTimeout time.Duration
+	// Timeout for socket writes. If reached, commands will fail
+	// with a timeout instead of blocking.
+	// Default is ReadTimeout.
+	WriteTimeout time.Duration
+
+	// Type of connection pool.
+	// true for FIFO pool, false for LIFO pool.
+	// Note that fifo has higher overhead compared to lifo.
+	PoolFIFO bool
+	// Maximum number of socket connections.
+	// Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
+	PoolSize int
+	// Minimum number of idle connections which is useful when establishing
+	// new connection is slow.
+	MinIdleConns int
+	// Connection age at which client retires (closes) the connection.
+	// Default is to not close aged connections.
+	MaxConnAge time.Duration
+	// Amount of time client waits for connection if all connections
+	// are busy before returning an error.
+	// Default is ReadTimeout + 1 second.
+	PoolTimeout time.Duration
+	// Amount of time after which client closes idle connections.
+	// Should be less than server's timeout.
+	// Default is 5 minutes. -1 disables idle timeout check.
+	IdleTimeout time.Duration
+	// Frequency of idle checks made by idle connections reaper.
+	// Default is 1 minute. -1 disables idle connections reaper,
+	// but idle connections are still discarded by the client
+	// if IdleTimeout is set.
+	IdleCheckFrequency time.Duration
+
+	// Enables read only queries on slave nodes.
+	readOnly bool
+
+	// TLS Config to use. When set TLS will be negotiated.
+	TLSConfig *tls.Config
+
+	// Limiter interface used to implemented circuit breaker or rate limiter.
+	Limiter Limiter
+}
+
+func (opt *Options) init() {
+	if opt.Addr == "" {
+		opt.Addr = "localhost:6379"
+	}
+	if opt.Network == "" {
+		if strings.HasPrefix(opt.Addr, "/") {
+			opt.Network = "unix"
+		} else {
+			opt.Network = "tcp"
+		}
+	}
+	if opt.DialTimeout == 0 {
+		opt.DialTimeout = 5 * time.Second
+	}
+	if opt.Dialer == nil {
+		opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
+			netDialer := &net.Dialer{
+				Timeout:   opt.DialTimeout,
+				KeepAlive: 5 * time.Minute,
+			}
+			if opt.TLSConfig == nil {
+				return netDialer.DialContext(ctx, network, addr)
+			}
+			return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
+		}
+	}
+	if opt.PoolSize == 0 {
+		opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
+	}
+	switch opt.ReadTimeout {
+	case -1:
+		opt.ReadTimeout = 0
+	case 0:
+		opt.ReadTimeout = 3 * time.Second
+	}
+	switch opt.WriteTimeout {
+	case -1:
+		opt.WriteTimeout = 0
+	case 0:
+		opt.WriteTimeout = opt.ReadTimeout
+	}
+	if opt.PoolTimeout == 0 {
+		opt.PoolTimeout = opt.ReadTimeout + time.Second
+	}
+	if opt.IdleTimeout == 0 {
+		opt.IdleTimeout = 5 * time.Minute
+	}
+	if opt.IdleCheckFrequency == 0 {
+		opt.IdleCheckFrequency = time.Minute
+	}
+
+	if opt.MaxRetries == -1 {
+		opt.MaxRetries = 0
+	} else if opt.MaxRetries == 0 {
+		opt.MaxRetries = 3
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *Options) clone() *Options {
+	clone := *opt
+	return &clone
+}
+
+// ParseURL parses an URL into Options that can be used to connect to Redis.
+// Scheme is required.
+// There are two connection types: by tcp socket and by unix socket.
+// Tcp connection:
+//		redis://<user>:<password>@<host>:<port>/<db_number>
+// Unix connection:
+//		unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
+// Most Option fields can be set using query parameters, with the following restrictions:
+//	- field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
+//	- only scalar type fields are supported (bool, int, time.Duration)
+//	- for time.Duration fields, values must be a valid input for time.ParseDuration();
+//	  additionally a plain integer as value (i.e. without unit) is intepreted as seconds
+//	- to disable a duration field, use value less than or equal to 0; to use the default
+//	  value, leave the value blank or remove the parameter
+//	- only the last value is interpreted if a parameter is given multiple times
+//	- fields "network", "addr", "username" and "password" can only be set using other
+//	  URL attributes (scheme, host, userinfo, resp.), query paremeters using these
+//	  names will be treated as unknown parameters
+//	- unknown parameter names will result in an error
+// Examples:
+//		redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
+//		is equivalent to:
+//		&Options{
+//			Network:     "tcp",
+//			Addr:        "localhost:6789",
+//			DB:          1,               // path "/3" was overridden by "&db=1"
+//			DialTimeout: 3 * time.Second, // no time unit = seconds
+//			ReadTimeout: 6 * time.Second,
+//			MaxRetries:  2,
+//		}
+func ParseURL(redisURL string) (*Options, error) {
+	u, err := url.Parse(redisURL)
+	if err != nil {
+		return nil, err
+	}
+
+	switch u.Scheme {
+	case "redis", "rediss":
+		return setupTCPConn(u)
+	case "unix":
+		return setupUnixConn(u)
+	default:
+		return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
+	}
+}
+
+func setupTCPConn(u *url.URL) (*Options, error) {
+	o := &Options{Network: "tcp"}
+
+	o.Username, o.Password = getUserPassword(u)
+
+	h, p, err := net.SplitHostPort(u.Host)
+	if err != nil {
+		h = u.Host
+	}
+	if h == "" {
+		h = "localhost"
+	}
+	if p == "" {
+		p = "6379"
+	}
+	o.Addr = net.JoinHostPort(h, p)
+
+	f := strings.FieldsFunc(u.Path, func(r rune) bool {
+		return r == '/'
+	})
+	switch len(f) {
+	case 0:
+		o.DB = 0
+	case 1:
+		if o.DB, err = strconv.Atoi(f[0]); err != nil {
+			return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
+		}
+	default:
+		return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
+	}
+
+	if u.Scheme == "rediss" {
+		o.TLSConfig = &tls.Config{ServerName: h}
+	}
+
+	return setupConnParams(u, o)
+}
+
+func setupUnixConn(u *url.URL) (*Options, error) {
+	o := &Options{
+		Network: "unix",
+	}
+
+	if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
+		return nil, errors.New("redis: empty unix socket path")
+	}
+	o.Addr = u.Path
+	o.Username, o.Password = getUserPassword(u)
+	return setupConnParams(u, o)
+}
+
+type queryOptions struct {
+	q   url.Values
+	err error
+}
+
+func (o *queryOptions) string(name string) string {
+	vs := o.q[name]
+	if len(vs) == 0 {
+		return ""
+	}
+	delete(o.q, name) // enable detection of unknown parameters
+	return vs[len(vs)-1]
+}
+
+func (o *queryOptions) int(name string) int {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	i, err := strconv.Atoi(s)
+	if err == nil {
+		return i
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) duration(name string) time.Duration {
+	s := o.string(name)
+	if s == "" {
+		return 0
+	}
+	// try plain number first
+	if i, err := strconv.Atoi(s); err == nil {
+		if i <= 0 {
+			// disable timeouts
+			return -1
+		}
+		return time.Duration(i) * time.Second
+	}
+	dur, err := time.ParseDuration(s)
+	if err == nil {
+		return dur
+	}
+	if o.err == nil {
+		o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
+	}
+	return 0
+}
+
+func (o *queryOptions) bool(name string) bool {
+	switch s := o.string(name); s {
+	case "true", "1":
+		return true
+	case "false", "0", "":
+		return false
+	default:
+		if o.err == nil {
+			o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
+		}
+		return false
+	}
+}
+
+func (o *queryOptions) remaining() []string {
+	if len(o.q) == 0 {
+		return nil
+	}
+	keys := make([]string, 0, len(o.q))
+	for k := range o.q {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	return keys
+}
+
+// setupConnParams converts query parameters in u to option value in o.
+func setupConnParams(u *url.URL, o *Options) (*Options, error) {
+	q := queryOptions{q: u.Query()}
+
+	// compat: a future major release may use q.int("db")
+	if tmp := q.string("db"); tmp != "" {
+		db, err := strconv.Atoi(tmp)
+		if err != nil {
+			return nil, fmt.Errorf("redis: invalid database number: %w", err)
+		}
+		o.DB = db
+	}
+
+	o.MaxRetries = q.int("max_retries")
+	o.MinRetryBackoff = q.duration("min_retry_backoff")
+	o.MaxRetryBackoff = q.duration("max_retry_backoff")
+	o.DialTimeout = q.duration("dial_timeout")
+	o.ReadTimeout = q.duration("read_timeout")
+	o.WriteTimeout = q.duration("write_timeout")
+	o.PoolFIFO = q.bool("pool_fifo")
+	o.PoolSize = q.int("pool_size")
+	o.MinIdleConns = q.int("min_idle_conns")
+	o.MaxConnAge = q.duration("max_conn_age")
+	o.PoolTimeout = q.duration("pool_timeout")
+	o.IdleTimeout = q.duration("idle_timeout")
+	o.IdleCheckFrequency = q.duration("idle_check_frequency")
+	if q.err != nil {
+		return nil, q.err
+	}
+
+	// any parameters left?
+	if r := q.remaining(); len(r) > 0 {
+		return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
+	}
+
+	return o, nil
+}
+
+func getUserPassword(u *url.URL) (string, string) {
+	var user, password string
+	if u.User != nil {
+		user = u.User.Username()
+		if p, ok := u.User.Password(); ok {
+			password = p
+		}
+	}
+	return user, password
+}
+
+func newConnPool(opt *Options) *pool.ConnPool {
+	return pool.NewConnPool(&pool.Options{
+		Dialer: func(ctx context.Context) (net.Conn, error) {
+			return opt.Dialer(ctx, opt.Network, opt.Addr)
+		},
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+	})
+}
diff --git a/vendor/github.com/go-redis/redis/v8/package.json b/vendor/github.com/go-redis/redis/v8/package.json
new file mode 100644
index 0000000..e4ea4bb
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/package.json
@@ -0,0 +1,8 @@
+{
+  "name": "redis",
+  "version": "8.11.5",
+  "main": "index.js",
+  "repository": "git@github.com:go-redis/redis.git",
+  "author": "Vladimir Mihailenco <vladimir.webdev@gmail.com>",
+  "license": "BSD-2-clause"
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
new file mode 100644
index 0000000..31bab97
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/pipeline.go
@@ -0,0 +1,147 @@
+package redis
+
+import (
+	"context"
+	"sync"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+)
+
+type pipelineExecer func(context.Context, []Cmder) error
+
+// Pipeliner is an mechanism to realise Redis Pipeline technique.
+//
+// Pipelining is a technique to extremely speed up processing by packing
+// operations to batches, send them at once to Redis and read a replies in a
+// singe step.
+// See https://redis.io/topics/pipelining
+//
+// Pay attention, that Pipeline is not a transaction, so you can get unexpected
+// results in case of big pipelines and small read/write timeouts.
+// Redis client has retransmission logic in case of timeouts, pipeline
+// can be retransmitted and commands can be executed more then once.
+// To avoid this: it is good idea to use reasonable bigger read/write timeouts
+// depends of your batch size and/or use TxPipeline.
+type Pipeliner interface {
+	StatefulCmdable
+	Len() int
+	Do(ctx context.Context, args ...interface{}) *Cmd
+	Process(ctx context.Context, cmd Cmder) error
+	Close() error
+	Discard() error
+	Exec(ctx context.Context) ([]Cmder, error)
+}
+
+var _ Pipeliner = (*Pipeline)(nil)
+
+// Pipeline implements pipelining as described in
+// http://redis.io/topics/pipelining. It's safe for concurrent use
+// by multiple goroutines.
+type Pipeline struct {
+	cmdable
+	statefulCmdable
+
+	ctx  context.Context
+	exec pipelineExecer
+
+	mu     sync.Mutex
+	cmds   []Cmder
+	closed bool
+}
+
+func (c *Pipeline) init() {
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+}
+
+// Len returns the number of queued commands.
+func (c *Pipeline) Len() int {
+	c.mu.Lock()
+	ln := len(c.cmds)
+	c.mu.Unlock()
+	return ln
+}
+
+// Do queues the custom command for later execution.
+func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Process queues the cmd for later execution.
+func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
+	c.mu.Lock()
+	c.cmds = append(c.cmds, cmd)
+	c.mu.Unlock()
+	return nil
+}
+
+// Close closes the pipeline, releasing any open resources.
+func (c *Pipeline) Close() error {
+	c.mu.Lock()
+	_ = c.discard()
+	c.closed = true
+	c.mu.Unlock()
+	return nil
+}
+
+// Discard resets the pipeline and discards queued commands.
+func (c *Pipeline) Discard() error {
+	c.mu.Lock()
+	err := c.discard()
+	c.mu.Unlock()
+	return err
+}
+
+func (c *Pipeline) discard() error {
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.cmds = c.cmds[:0]
+	return nil
+}
+
+// Exec executes all previously queued commands using one
+// client-server roundtrip.
+//
+// Exec always returns list of commands and error of the first failed
+// command if any.
+func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+
+	if len(c.cmds) == 0 {
+		return nil, nil
+	}
+
+	cmds := c.cmds
+	c.cmds = nil
+
+	return cmds, c.exec(ctx, cmds)
+}
+
+func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	if err := fn(c); err != nil {
+		return nil, err
+	}
+	cmds, err := c.Exec(ctx)
+	_ = c.Close()
+	return cmds, err
+}
+
+func (c *Pipeline) Pipeline() Pipeliner {
+	return c
+}
+
+func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipelined(ctx, fn)
+}
+
+func (c *Pipeline) TxPipeline() Pipeliner {
+	return c
+}
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
new file mode 100644
index 0000000..efc2354
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/pubsub.go
@@ -0,0 +1,668 @@
+package redis
+
+import (
+	"context"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// PubSub implements Pub/Sub commands as described in
+// http://redis.io/topics/pubsub. Message receiving is NOT safe
+// for concurrent use by multiple goroutines.
+//
+// PubSub automatically reconnects to Redis Server and resubscribes
+// to the channels in case of network errors.
+type PubSub struct {
+	opt *Options
+
+	newConn   func(ctx context.Context, channels []string) (*pool.Conn, error)
+	closeConn func(*pool.Conn) error
+
+	mu       sync.Mutex
+	cn       *pool.Conn
+	channels map[string]struct{}
+	patterns map[string]struct{}
+
+	closed bool
+	exit   chan struct{}
+
+	cmd *Cmd
+
+	chOnce sync.Once
+	msgCh  *channel
+	allCh  *channel
+}
+
+func (c *PubSub) init() {
+	c.exit = make(chan struct{})
+}
+
+func (c *PubSub) String() string {
+	channels := mapKeys(c.channels)
+	channels = append(channels, mapKeys(c.patterns)...)
+	return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
+}
+
+func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
+	c.mu.Lock()
+	cn, err := c.conn(ctx, nil)
+	c.mu.Unlock()
+	return cn, err
+}
+
+func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
+	if c.closed {
+		return nil, pool.ErrClosed
+	}
+	if c.cn != nil {
+		return c.cn, nil
+	}
+
+	channels := mapKeys(c.channels)
+	channels = append(channels, newChannels...)
+
+	cn, err := c.newConn(ctx, channels)
+	if err != nil {
+		return nil, err
+	}
+
+	if err := c.resubscribe(ctx, cn); err != nil {
+		_ = c.closeConn(cn)
+		return nil, err
+	}
+
+	c.cn = cn
+	return cn, nil
+}
+
+func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
+	return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmd(wr, cmd)
+	})
+}
+
+func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
+	var firstErr error
+
+	if len(c.channels) > 0 {
+		firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
+	}
+
+	if len(c.patterns) > 0 {
+		err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
+		if err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+
+	return firstErr
+}
+
+func mapKeys(m map[string]struct{}) []string {
+	s := make([]string, len(m))
+	i := 0
+	for k := range m {
+		s[i] = k
+		i++
+	}
+	return s
+}
+
+func (c *PubSub) _subscribe(
+	ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
+) error {
+	args := make([]interface{}, 0, 1+len(channels))
+	args = append(args, redisCmd)
+	for _, channel := range channels {
+		args = append(args, channel)
+	}
+	cmd := NewSliceCmd(ctx, args...)
+	return c.writeCmd(ctx, cn, cmd)
+}
+
+func (c *PubSub) releaseConnWithLock(
+	ctx context.Context,
+	cn *pool.Conn,
+	err error,
+	allowTimeout bool,
+) {
+	c.mu.Lock()
+	c.releaseConn(ctx, cn, err, allowTimeout)
+	c.mu.Unlock()
+}
+
+func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
+	if c.cn != cn {
+		return
+	}
+	if isBadConn(err, allowTimeout, c.opt.Addr) {
+		c.reconnect(ctx, err)
+	}
+}
+
+func (c *PubSub) reconnect(ctx context.Context, reason error) {
+	_ = c.closeTheCn(reason)
+	_, _ = c.conn(ctx, nil)
+}
+
+func (c *PubSub) closeTheCn(reason error) error {
+	if c.cn == nil {
+		return nil
+	}
+	if !c.closed {
+		internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
+	}
+	err := c.closeConn(c.cn)
+	c.cn = nil
+	return err
+}
+
+func (c *PubSub) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return pool.ErrClosed
+	}
+	c.closed = true
+	close(c.exit)
+
+	return c.closeTheCn(pool.ErrClosed)
+}
+
+// Subscribe the client to the specified channels. It returns
+// empty subscription if there are no channels.
+func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	err := c.subscribe(ctx, "subscribe", channels...)
+	if c.channels == nil {
+		c.channels = make(map[string]struct{})
+	}
+	for _, s := range channels {
+		c.channels[s] = struct{}{}
+	}
+	return err
+}
+
+// PSubscribe the client to the given patterns. It returns
+// empty subscription if there are no patterns.
+func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	err := c.subscribe(ctx, "psubscribe", patterns...)
+	if c.patterns == nil {
+		c.patterns = make(map[string]struct{})
+	}
+	for _, s := range patterns {
+		c.patterns[s] = struct{}{}
+	}
+	return err
+}
+
+// Unsubscribe the client from the given channels, or from all of
+// them if none is given.
+func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	for _, channel := range channels {
+		delete(c.channels, channel)
+	}
+	err := c.subscribe(ctx, "unsubscribe", channels...)
+	return err
+}
+
+// PUnsubscribe the client from the given patterns, or from all of
+// them if none is given.
+func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	for _, pattern := range patterns {
+		delete(c.patterns, pattern)
+	}
+	err := c.subscribe(ctx, "punsubscribe", patterns...)
+	return err
+}
+
+func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
+	cn, err := c.conn(ctx, channels)
+	if err != nil {
+		return err
+	}
+
+	err = c._subscribe(ctx, cn, redisCmd, channels)
+	c.releaseConn(ctx, cn, err, false)
+	return err
+}
+
+func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
+	args := []interface{}{"ping"}
+	if len(payload) == 1 {
+		args = append(args, payload[0])
+	}
+	cmd := NewCmd(ctx, args...)
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	cn, err := c.conn(ctx, nil)
+	if err != nil {
+		return err
+	}
+
+	err = c.writeCmd(ctx, cn, cmd)
+	c.releaseConn(ctx, cn, err, false)
+	return err
+}
+
+// Subscription received after a successful subscription to channel.
+type Subscription struct {
+	// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
+	Kind string
+	// Channel name we have subscribed to.
+	Channel string
+	// Number of channels we are currently subscribed to.
+	Count int
+}
+
+func (m *Subscription) String() string {
+	return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
+}
+
+// Message received as result of a PUBLISH command issued by another client.
+type Message struct {
+	Channel      string
+	Pattern      string
+	Payload      string
+	PayloadSlice []string
+}
+
+func (m *Message) String() string {
+	return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
+}
+
+// Pong received as result of a PING command issued by another client.
+type Pong struct {
+	Payload string
+}
+
+func (p *Pong) String() string {
+	if p.Payload != "" {
+		return fmt.Sprintf("Pong<%s>", p.Payload)
+	}
+	return "Pong"
+}
+
+func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
+	switch reply := reply.(type) {
+	case string:
+		return &Pong{
+			Payload: reply,
+		}, nil
+	case []interface{}:
+		switch kind := reply[0].(string); kind {
+		case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
+			// Can be nil in case of "unsubscribe".
+			channel, _ := reply[1].(string)
+			return &Subscription{
+				Kind:    kind,
+				Channel: channel,
+				Count:   int(reply[2].(int64)),
+			}, nil
+		case "message":
+			switch payload := reply[2].(type) {
+			case string:
+				return &Message{
+					Channel: reply[1].(string),
+					Payload: payload,
+				}, nil
+			case []interface{}:
+				ss := make([]string, len(payload))
+				for i, s := range payload {
+					ss[i] = s.(string)
+				}
+				return &Message{
+					Channel:      reply[1].(string),
+					PayloadSlice: ss,
+				}, nil
+			default:
+				return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
+			}
+		case "pmessage":
+			return &Message{
+				Pattern: reply[1].(string),
+				Channel: reply[2].(string),
+				Payload: reply[3].(string),
+			}, nil
+		case "pong":
+			return &Pong{
+				Payload: reply[1].(string),
+			}, nil
+		default:
+			return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
+		}
+	default:
+		return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
+	}
+}
+
+// ReceiveTimeout acts like Receive but returns an error if message
+// is not received in time. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
+	if c.cmd == nil {
+		c.cmd = NewCmd(ctx)
+	}
+
+	// Don't hold the lock to allow subscriptions and pings.
+
+	cn, err := c.connWithLock(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
+		return c.cmd.readReply(rd)
+	})
+
+	c.releaseConnWithLock(ctx, cn, err, timeout > 0)
+
+	if err != nil {
+		return nil, err
+	}
+
+	return c.newMessage(c.cmd.Val())
+}
+
+// Receive returns a message as a Subscription, Message, Pong or error.
+// See PubSub example for details. This is low-level API and in most cases
+// Channel should be used instead.
+func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
+	return c.ReceiveTimeout(ctx, 0)
+}
+
+// ReceiveMessage returns a Message or error ignoring Subscription and Pong
+// messages. This is low-level API and in most cases Channel should be used
+// instead.
+func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
+	for {
+		msg, err := c.Receive(ctx)
+		if err != nil {
+			return nil, err
+		}
+
+		switch msg := msg.(type) {
+		case *Subscription:
+			// Ignore.
+		case *Pong:
+			// Ignore.
+		case *Message:
+			return msg, nil
+		default:
+			err := fmt.Errorf("redis: unknown message: %T", msg)
+			return nil, err
+		}
+	}
+}
+
+func (c *PubSub) getContext() context.Context {
+	if c.cmd != nil {
+		return c.cmd.ctx
+	}
+	return context.Background()
+}
+
+//------------------------------------------------------------------------------
+
+// Channel returns a Go channel for concurrently receiving messages.
+// The channel is closed together with the PubSub. If the Go channel
+// is blocked full for 30 seconds the message is dropped.
+// Receive* APIs can not be used after channel is created.
+//
+// go-redis periodically sends ping messages to test connection health
+// and re-subscribes if ping can not not received for 30 seconds.
+func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
+	c.chOnce.Do(func() {
+		c.msgCh = newChannel(c, opts...)
+		c.msgCh.initMsgChan()
+	})
+	if c.msgCh == nil {
+		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
+		panic(err)
+	}
+	return c.msgCh.msgCh
+}
+
+// ChannelSize is like Channel, but creates a Go channel
+// with specified buffer size.
+//
+// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
+func (c *PubSub) ChannelSize(size int) <-chan *Message {
+	return c.Channel(WithChannelSize(size))
+}
+
+// ChannelWithSubscriptions is like Channel, but message type can be either
+// *Subscription or *Message. Subscription messages can be used to detect
+// reconnections.
+//
+// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
+func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
+	c.chOnce.Do(func() {
+		c.allCh = newChannel(c, WithChannelSize(size))
+		c.allCh.initAllChan()
+	})
+	if c.allCh == nil {
+		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
+		panic(err)
+	}
+	return c.allCh.allCh
+}
+
+type ChannelOption func(c *channel)
+
+// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
+//
+// The default is 100 messages.
+func WithChannelSize(size int) ChannelOption {
+	return func(c *channel) {
+		c.chanSize = size
+	}
+}
+
+// WithChannelHealthCheckInterval specifies the health check interval.
+// PubSub will ping Redis Server if it does not receive any messages within the interval.
+// To disable health check, use zero interval.
+//
+// The default is 3 seconds.
+func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.checkInterval = d
+	}
+}
+
+// WithChannelSendTimeout specifies the channel send timeout after which
+// the message is dropped.
+//
+// The default is 60 seconds.
+func WithChannelSendTimeout(d time.Duration) ChannelOption {
+	return func(c *channel) {
+		c.chanSendTimeout = d
+	}
+}
+
+type channel struct {
+	pubSub *PubSub
+
+	msgCh chan *Message
+	allCh chan interface{}
+	ping  chan struct{}
+
+	chanSize        int
+	chanSendTimeout time.Duration
+	checkInterval   time.Duration
+}
+
+func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
+	c := &channel{
+		pubSub: pubSub,
+
+		chanSize:        100,
+		chanSendTimeout: time.Minute,
+		checkInterval:   3 * time.Second,
+	}
+	for _, opt := range opts {
+		opt(c)
+	}
+	if c.checkInterval > 0 {
+		c.initHealthCheck()
+	}
+	return c
+}
+
+func (c *channel) initHealthCheck() {
+	ctx := context.TODO()
+	c.ping = make(chan struct{}, 1)
+
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		timer.Stop()
+
+		for {
+			timer.Reset(c.checkInterval)
+			select {
+			case <-c.ping:
+				if !timer.Stop() {
+					<-timer.C
+				}
+			case <-timer.C:
+				if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
+					c.pubSub.mu.Lock()
+					c.pubSub.reconnect(ctx, pingErr)
+					c.pubSub.mu.Unlock()
+				}
+			case <-c.pubSub.exit:
+				return
+			}
+		}
+	}()
+}
+
+// initMsgChan must be in sync with initAllChan.
+func (c *channel) initMsgChan() {
+	ctx := context.TODO()
+	c.msgCh = make(chan *Message, c.chanSize)
+
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		timer.Stop()
+
+		var errCount int
+		for {
+			msg, err := c.pubSub.Receive(ctx)
+			if err != nil {
+				if err == pool.ErrClosed {
+					close(c.msgCh)
+					return
+				}
+				if errCount > 0 {
+					time.Sleep(100 * time.Millisecond)
+				}
+				errCount++
+				continue
+			}
+
+			errCount = 0
+
+			// Any message is as good as a ping.
+			select {
+			case c.ping <- struct{}{}:
+			default:
+			}
+
+			switch msg := msg.(type) {
+			case *Subscription:
+				// Ignore.
+			case *Pong:
+				// Ignore.
+			case *Message:
+				timer.Reset(c.chanSendTimeout)
+				select {
+				case c.msgCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
+				}
+			default:
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+			}
+		}
+	}()
+}
+
+// initAllChan must be in sync with initMsgChan.
+func (c *channel) initAllChan() {
+	ctx := context.TODO()
+	c.allCh = make(chan interface{}, c.chanSize)
+
+	go func() {
+		timer := time.NewTimer(time.Minute)
+		timer.Stop()
+
+		var errCount int
+		for {
+			msg, err := c.pubSub.Receive(ctx)
+			if err != nil {
+				if err == pool.ErrClosed {
+					close(c.allCh)
+					return
+				}
+				if errCount > 0 {
+					time.Sleep(100 * time.Millisecond)
+				}
+				errCount++
+				continue
+			}
+
+			errCount = 0
+
+			// Any message is as good as a ping.
+			select {
+			case c.ping <- struct{}{}:
+			default:
+			}
+
+			switch msg := msg.(type) {
+			case *Pong:
+				// Ignore.
+			case *Subscription, *Message:
+				timer.Reset(c.chanSendTimeout)
+				select {
+				case c.allCh <- msg:
+					if !timer.Stop() {
+						<-timer.C
+					}
+				case <-timer.C:
+					internal.Logger.Printf(
+						ctx, "redis: %s channel is full for %s (message is dropped)",
+						c, c.chanSendTimeout)
+				}
+			default:
+				internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
+			}
+		}
+	}()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
new file mode 100644
index 0000000..bcf8a2a
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/redis.go
@@ -0,0 +1,773 @@
+package redis
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"sync/atomic"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// Nil reply returned by Redis when key does not exist.
+const Nil = proto.Nil
+
+func SetLogger(logger internal.Logging) {
+	internal.Logger = logger
+}
+
+//------------------------------------------------------------------------------
+
+type Hook interface {
+	BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
+	AfterProcess(ctx context.Context, cmd Cmder) error
+
+	BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
+	AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
+}
+
+type hooks struct {
+	hooks []Hook
+}
+
+func (hs *hooks) lock() {
+	hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
+}
+
+func (hs hooks) clone() hooks {
+	clone := hs
+	clone.lock()
+	return clone
+}
+
+func (hs *hooks) AddHook(hook Hook) {
+	hs.hooks = append(hs.hooks, hook)
+}
+
+func (hs hooks) process(
+	ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
+) error {
+	if len(hs.hooks) == 0 {
+		err := fn(ctx, cmd)
+		cmd.SetErr(err)
+		return err
+	}
+
+	var hookIndex int
+	var retErr error
+
+	for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+		ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
+		if retErr != nil {
+			cmd.SetErr(retErr)
+		}
+	}
+
+	if retErr == nil {
+		retErr = fn(ctx, cmd)
+		cmd.SetErr(retErr)
+	}
+
+	for hookIndex--; hookIndex >= 0; hookIndex-- {
+		if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
+			retErr = err
+			cmd.SetErr(retErr)
+		}
+	}
+
+	return retErr
+}
+
+func (hs hooks) processPipeline(
+	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+	if len(hs.hooks) == 0 {
+		err := fn(ctx, cmds)
+		return err
+	}
+
+	var hookIndex int
+	var retErr error
+
+	for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
+		ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
+		if retErr != nil {
+			setCmdsErr(cmds, retErr)
+		}
+	}
+
+	if retErr == nil {
+		retErr = fn(ctx, cmds)
+	}
+
+	for hookIndex--; hookIndex >= 0; hookIndex-- {
+		if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
+			retErr = err
+			setCmdsErr(cmds, retErr)
+		}
+	}
+
+	return retErr
+}
+
+func (hs hooks) processTxPipeline(
+	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
+) error {
+	cmds = wrapMultiExec(ctx, cmds)
+	return hs.processPipeline(ctx, cmds, fn)
+}
+
+//------------------------------------------------------------------------------
+
+type baseClient struct {
+	opt      *Options
+	connPool pool.Pooler
+
+	onClose func() error // hook called when client is closed
+}
+
+func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
+	return &baseClient{
+		opt:      opt,
+		connPool: connPool,
+	}
+}
+
+func (c *baseClient) clone() *baseClient {
+	clone := *c
+	return &clone
+}
+
+func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
+	opt := c.opt.clone()
+	opt.ReadTimeout = timeout
+	opt.WriteTimeout = timeout
+
+	clone := c.clone()
+	clone.opt = opt
+
+	return clone
+}
+
+func (c *baseClient) String() string {
+	return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
+}
+
+func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
+	cn, err := c.connPool.NewConn(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	err = c.initConn(ctx, cn)
+	if err != nil {
+		_ = c.connPool.CloseConn(cn)
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
+	if c.opt.Limiter != nil {
+		err := c.opt.Limiter.Allow()
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	cn, err := c._getConn(ctx)
+	if err != nil {
+		if c.opt.Limiter != nil {
+			c.opt.Limiter.ReportResult(err)
+		}
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
+	cn, err := c.connPool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+
+	if cn.Inited {
+		return cn, nil
+	}
+
+	if err := c.initConn(ctx, cn); err != nil {
+		c.connPool.Remove(ctx, cn, err)
+		if err := errors.Unwrap(err); err != nil {
+			return nil, err
+		}
+		return nil, err
+	}
+
+	return cn, nil
+}
+
+func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
+	if cn.Inited {
+		return nil
+	}
+	cn.Inited = true
+
+	if c.opt.Password == "" &&
+		c.opt.DB == 0 &&
+		!c.opt.readOnly &&
+		c.opt.OnConnect == nil {
+		return nil
+	}
+
+	connPool := pool.NewSingleConnPool(c.connPool, cn)
+	conn := newConn(ctx, c.opt, connPool)
+
+	_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
+		if c.opt.Password != "" {
+			if c.opt.Username != "" {
+				pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
+			} else {
+				pipe.Auth(ctx, c.opt.Password)
+			}
+		}
+
+		if c.opt.DB > 0 {
+			pipe.Select(ctx, c.opt.DB)
+		}
+
+		if c.opt.readOnly {
+			pipe.ReadOnly(ctx)
+		}
+
+		return nil
+	})
+	if err != nil {
+		return err
+	}
+
+	if c.opt.OnConnect != nil {
+		return c.opt.OnConnect(ctx, conn)
+	}
+	return nil
+}
+
+func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
+	if c.opt.Limiter != nil {
+		c.opt.Limiter.ReportResult(err)
+	}
+
+	if isBadConn(err, false, c.opt.Addr) {
+		c.connPool.Remove(ctx, cn, err)
+	} else {
+		c.connPool.Put(ctx, cn)
+	}
+}
+
+func (c *baseClient) withConn(
+	ctx context.Context, fn func(context.Context, *pool.Conn) error,
+) error {
+	cn, err := c.getConn(ctx)
+	if err != nil {
+		return err
+	}
+
+	defer func() {
+		c.releaseConn(ctx, cn, err)
+	}()
+
+	done := ctx.Done() //nolint:ifshort
+
+	if done == nil {
+		err = fn(ctx, cn)
+		return err
+	}
+
+	errc := make(chan error, 1)
+	go func() { errc <- fn(ctx, cn) }()
+
+	select {
+	case <-done:
+		_ = cn.Close()
+		// Wait for the goroutine to finish and send something.
+		<-errc
+
+		err = ctx.Err()
+		return err
+	case err = <-errc:
+		return err
+	}
+}
+
+func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		attempt := attempt
+
+		retry, err := c._process(ctx, cmd, attempt)
+		if err == nil || !retry {
+			return err
+		}
+
+		lastErr = err
+	}
+	return lastErr
+}
+
+func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
+	if attempt > 0 {
+		if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+			return false, err
+		}
+	}
+
+	retryTimeout := uint32(1)
+	err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+		err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+			return writeCmd(wr, cmd)
+		})
+		if err != nil {
+			return err
+		}
+
+		err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
+		if err != nil {
+			if cmd.readTimeout() == nil {
+				atomic.StoreUint32(&retryTimeout, 1)
+			}
+			return err
+		}
+
+		return nil
+	})
+	if err == nil {
+		return false, nil
+	}
+
+	retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
+	return retry, err
+}
+
+func (c *baseClient) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
+	if timeout := cmd.readTimeout(); timeout != nil {
+		t := *timeout
+		if t == 0 {
+			return 0
+		}
+		return t + 10*time.Second
+	}
+	return c.opt.ReadTimeout
+}
+
+// Close closes the client, releasing any open resources.
+//
+// It is rare to Close a Client, as the Client is meant to be
+// long-lived and shared between many goroutines.
+func (c *baseClient) Close() error {
+	var firstErr error
+	if c.onClose != nil {
+		if err := c.onClose(); err != nil {
+			firstErr = err
+		}
+	}
+	if err := c.connPool.Close(); err != nil && firstErr == nil {
+		firstErr = err
+	}
+	return firstErr
+}
+
+func (c *baseClient) getAddr() string {
+	return c.opt.Addr
+}
+
+func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
+}
+
+func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
+}
+
+type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
+
+func (c *baseClient) generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+	err := c._generalProcessPipeline(ctx, cmds, p)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+	return cmdsFirstErr(cmds)
+}
+
+func (c *baseClient) _generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, p pipelineProcessor,
+) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		var canRetry bool
+		lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
+			var err error
+			canRetry, err = p(ctx, cn, cmds)
+			return err
+		})
+		if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *baseClient) pipelineProcessCmds(
+	ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmds(wr, cmds)
+	})
+	if err != nil {
+		return true, err
+	}
+
+	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+		return pipelineReadCmds(rd, cmds)
+	})
+	return true, err
+}
+
+func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
+	for _, cmd := range cmds {
+		err := cmd.readReply(rd)
+		cmd.SetErr(err)
+		if err != nil && !isRedisError(err) {
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *baseClient) txPipelineProcessCmds(
+	ctx context.Context, cn *pool.Conn, cmds []Cmder,
+) (bool, error) {
+	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
+		return writeCmds(wr, cmds)
+	})
+	if err != nil {
+		return true, err
+	}
+
+	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
+		statusCmd := cmds[0].(*StatusCmd)
+		// Trim multi and exec.
+		cmds = cmds[1 : len(cmds)-1]
+
+		err := txPipelineReadQueued(rd, statusCmd, cmds)
+		if err != nil {
+			return err
+		}
+
+		return pipelineReadCmds(rd, cmds)
+	})
+	return false, err
+}
+
+func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
+	if len(cmds) == 0 {
+		panic("not reached")
+	}
+	cmdCopy := make([]Cmder, len(cmds)+2)
+	cmdCopy[0] = NewStatusCmd(ctx, "multi")
+	copy(cmdCopy[1:], cmds)
+	cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
+	return cmdCopy
+}
+
+func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
+	// Parse queued replies.
+	if err := statusCmd.readReply(rd); err != nil {
+		return err
+	}
+
+	for range cmds {
+		if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
+			return err
+		}
+	}
+
+	// Parse number of replies.
+	line, err := rd.ReadLine()
+	if err != nil {
+		if err == Nil {
+			err = TxFailedErr
+		}
+		return err
+	}
+
+	switch line[0] {
+	case proto.ErrorReply:
+		return proto.ParseErrorReply(line)
+	case proto.ArrayReply:
+		// ok
+	default:
+		err := fmt.Errorf("redis: expected '*', but got line %q", line)
+		return err
+	}
+
+	return nil
+}
+
+//------------------------------------------------------------------------------
+
+// Client is a Redis client representing a pool of zero or more
+// underlying connections. It's safe for concurrent use by multiple
+// goroutines.
+type Client struct {
+	*baseClient
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+// NewClient returns a client to the Redis Server specified by Options.
+func NewClient(opt *Options) *Client {
+	opt.init()
+
+	c := Client{
+		baseClient: newBaseClient(opt, newConnPool(opt)),
+		ctx:        context.Background(),
+	}
+	c.cmdable = c.Process
+
+	return &c
+}
+
+func (c *Client) clone() *Client {
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.lock()
+	return &clone
+}
+
+func (c *Client) WithTimeout(timeout time.Duration) *Client {
+	clone := c.clone()
+	clone.baseClient = c.baseClient.withTimeout(timeout)
+	return clone
+}
+
+func (c *Client) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Client) WithContext(ctx context.Context) *Client {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := c.clone()
+	clone.ctx = ctx
+	return clone
+}
+
+func (c *Client) Conn(ctx context.Context) *Conn {
+	return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *Client) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Client) Options() *Options {
+	return c.opt
+}
+
+type PoolStats pool.Stats
+
+// PoolStats returns connection pool stats.
+func (c *Client) PoolStats() *PoolStats {
+	stats := c.connPool.Stats()
+	return (*PoolStats)(stats)
+}
+
+func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Client) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Client) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Client) pubSub() *PubSub {
+	pubsub := &PubSub{
+		opt: c.opt,
+
+		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+			return c.newConn(ctx)
+		},
+		closeConn: c.connPool.CloseConn,
+	}
+	pubsub.init()
+	return pubsub
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+// Note that this method does not wait on a response from Redis, so the
+// subscription may not be active immediately. To force the connection to wait,
+// you may call the Receive() method on the returned *PubSub like so:
+//
+//    sub := client.Subscribe(queryResp)
+//    iface, err := sub.Receive()
+//    if err != nil {
+//        // handle error
+//    }
+//
+//    // Should be *Subscription, but others are possible if other actions have been
+//    // taken on sub since it was created.
+//    switch iface.(type) {
+//    case *Subscription:
+//        // subscribe succeeded
+//    case *Message:
+//        // received first message
+//    case *Pong:
+//        // pong received
+//    default:
+//        // handle error
+//    }
+//
+//    ch := sub.Channel()
+func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+//------------------------------------------------------------------------------
+
+type conn struct {
+	baseClient
+	cmdable
+	statefulCmdable
+	hooks // TODO: inherit hooks
+}
+
+// Conn represents a single Redis connection rather than a pool of connections.
+// Prefer running commands from Client unless there is a specific need
+// for a continuous single Redis connection.
+type Conn struct {
+	*conn
+	ctx context.Context
+}
+
+func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
+	c := Conn{
+		conn: &conn{
+			baseClient: baseClient{
+				opt:      opt,
+				connPool: connPool,
+			},
+		},
+		ctx: ctx,
+	}
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+	return &c
+}
+
+func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+}
+
+func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+}
+
+func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Conn) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
+func (c *Conn) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go
new file mode 100644
index 0000000..24cfd49
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/result.go
@@ -0,0 +1,180 @@
+package redis
+
+import "time"
+
+// NewCmdResult returns a Cmd initialised with val and err for testing.
+func NewCmdResult(val interface{}, err error) *Cmd {
+	var cmd Cmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewSliceResult returns a SliceCmd initialised with val and err for testing.
+func NewSliceResult(val []interface{}, err error) *SliceCmd {
+	var cmd SliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStatusResult returns a StatusCmd initialised with val and err for testing.
+func NewStatusResult(val string, err error) *StatusCmd {
+	var cmd StatusCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewIntResult returns an IntCmd initialised with val and err for testing.
+func NewIntResult(val int64, err error) *IntCmd {
+	var cmd IntCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewDurationResult returns a DurationCmd initialised with val and err for testing.
+func NewDurationResult(val time.Duration, err error) *DurationCmd {
+	var cmd DurationCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewBoolResult returns a BoolCmd initialised with val and err for testing.
+func NewBoolResult(val bool, err error) *BoolCmd {
+	var cmd BoolCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringResult returns a StringCmd initialised with val and err for testing.
+func NewStringResult(val string, err error) *StringCmd {
+	var cmd StringCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewFloatResult returns a FloatCmd initialised with val and err for testing.
+func NewFloatResult(val float64, err error) *FloatCmd {
+	var cmd FloatCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
+func NewStringSliceResult(val []string, err error) *StringSliceCmd {
+	var cmd StringSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
+func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
+	var cmd BoolSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
+func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
+	var cmd StringStringMapCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
+func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
+	var cmd StringIntMapCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
+func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
+	var cmd TimeCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
+func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
+	var cmd ZSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
+func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
+	var cmd ZWithKeyCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
+func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
+	var cmd ScanCmd
+	cmd.page = keys
+	cmd.cursor = cursor
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
+func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
+	var cmd ClusterSlotsCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
+func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
+	var cmd GeoLocationCmd
+	cmd.locations = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
+func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
+	var cmd GeoPosCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
+func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
+	var cmd CommandsInfoCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
+func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
+	var cmd XMessageSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
+
+// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
+func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
+	var cmd XStreamSliceCmd
+	cmd.val = val
+	cmd.SetErr(err)
+	return &cmd
+}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
new file mode 100644
index 0000000..4df00fc
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/ring.go
@@ -0,0 +1,736 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"fmt"
+	"net"
+	"strconv"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/cespare/xxhash/v2"
+	rendezvous "github.com/dgryski/go-rendezvous" //nolint
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/hashtag"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+var errRingShardsDown = errors.New("redis: all ring shards are down")
+
+//------------------------------------------------------------------------------
+
+type ConsistentHash interface {
+	Get(string) string
+}
+
+type rendezvousWrapper struct {
+	*rendezvous.Rendezvous
+}
+
+func (w rendezvousWrapper) Get(key string) string {
+	return w.Lookup(key)
+}
+
+func newRendezvous(shards []string) ConsistentHash {
+	return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
+}
+
+//------------------------------------------------------------------------------
+
+// RingOptions are used to configure a ring client and should be
+// passed to NewRing.
+type RingOptions struct {
+	// Map of name => host:port addresses of ring shards.
+	Addrs map[string]string
+
+	// NewClient creates a shard client with provided name and options.
+	NewClient func(name string, opt *Options) *Client
+
+	// Frequency of PING commands sent to check shards availability.
+	// Shard is considered down after 3 subsequent failed checks.
+	HeartbeatFrequency time.Duration
+
+	// NewConsistentHash returns a consistent hash that is used
+	// to distribute keys across the shards.
+	//
+	// See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
+	// for consistent hashing algorithmic tradeoffs.
+	NewConsistentHash func(shards []string) ConsistentHash
+
+	// Following options are copied from Options struct.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username string
+	Password string
+	DB       int
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+	Limiter   Limiter
+}
+
+func (opt *RingOptions) init() {
+	if opt.NewClient == nil {
+		opt.NewClient = func(name string, opt *Options) *Client {
+			return NewClient(opt)
+		}
+	}
+
+	if opt.HeartbeatFrequency == 0 {
+		opt.HeartbeatFrequency = 500 * time.Millisecond
+	}
+
+	if opt.NewConsistentHash == nil {
+		opt.NewConsistentHash = newRendezvous
+	}
+
+	if opt.MaxRetries == -1 {
+		opt.MaxRetries = 0
+	} else if opt.MaxRetries == 0 {
+		opt.MaxRetries = 3
+	}
+	switch opt.MinRetryBackoff {
+	case -1:
+		opt.MinRetryBackoff = 0
+	case 0:
+		opt.MinRetryBackoff = 8 * time.Millisecond
+	}
+	switch opt.MaxRetryBackoff {
+	case -1:
+		opt.MaxRetryBackoff = 0
+	case 0:
+		opt.MaxRetryBackoff = 512 * time.Millisecond
+	}
+}
+
+func (opt *RingOptions) clientOptions() *Options {
+	return &Options{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		Username: opt.Username,
+		Password: opt.Password,
+		DB:       opt.DB,
+
+		MaxRetries: -1,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+
+		TLSConfig: opt.TLSConfig,
+		Limiter:   opt.Limiter,
+	}
+}
+
+//------------------------------------------------------------------------------
+
+type ringShard struct {
+	Client *Client
+	down   int32
+}
+
+func newRingShard(opt *RingOptions, name, addr string) *ringShard {
+	clopt := opt.clientOptions()
+	clopt.Addr = addr
+
+	return &ringShard{
+		Client: opt.NewClient(name, clopt),
+	}
+}
+
+func (shard *ringShard) String() string {
+	var state string
+	if shard.IsUp() {
+		state = "up"
+	} else {
+		state = "down"
+	}
+	return fmt.Sprintf("%s is %s", shard.Client, state)
+}
+
+func (shard *ringShard) IsDown() bool {
+	const threshold = 3
+	return atomic.LoadInt32(&shard.down) >= threshold
+}
+
+func (shard *ringShard) IsUp() bool {
+	return !shard.IsDown()
+}
+
+// Vote votes to set shard state and returns true if state was changed.
+func (shard *ringShard) Vote(up bool) bool {
+	if up {
+		changed := shard.IsDown()
+		atomic.StoreInt32(&shard.down, 0)
+		return changed
+	}
+
+	if shard.IsDown() {
+		return false
+	}
+
+	atomic.AddInt32(&shard.down, 1)
+	return shard.IsDown()
+}
+
+//------------------------------------------------------------------------------
+
+type ringShards struct {
+	opt *RingOptions
+
+	mu       sync.RWMutex
+	hash     ConsistentHash
+	shards   map[string]*ringShard // read only
+	list     []*ringShard          // read only
+	numShard int
+	closed   bool
+}
+
+func newRingShards(opt *RingOptions) *ringShards {
+	shards := make(map[string]*ringShard, len(opt.Addrs))
+	list := make([]*ringShard, 0, len(shards))
+
+	for name, addr := range opt.Addrs {
+		shard := newRingShard(opt, name, addr)
+		shards[name] = shard
+
+		list = append(list, shard)
+	}
+
+	c := &ringShards{
+		opt: opt,
+
+		shards: shards,
+		list:   list,
+	}
+	c.rebalance()
+
+	return c
+}
+
+func (c *ringShards) List() []*ringShard {
+	var list []*ringShard
+
+	c.mu.RLock()
+	if !c.closed {
+		list = c.list
+	}
+	c.mu.RUnlock()
+
+	return list
+}
+
+func (c *ringShards) Hash(key string) string {
+	key = hashtag.Key(key)
+
+	var hash string
+
+	c.mu.RLock()
+	if c.numShard > 0 {
+		hash = c.hash.Get(key)
+	}
+	c.mu.RUnlock()
+
+	return hash
+}
+
+func (c *ringShards) GetByKey(key string) (*ringShard, error) {
+	key = hashtag.Key(key)
+
+	c.mu.RLock()
+
+	if c.closed {
+		c.mu.RUnlock()
+		return nil, pool.ErrClosed
+	}
+
+	if c.numShard == 0 {
+		c.mu.RUnlock()
+		return nil, errRingShardsDown
+	}
+
+	hash := c.hash.Get(key)
+	if hash == "" {
+		c.mu.RUnlock()
+		return nil, errRingShardsDown
+	}
+
+	shard := c.shards[hash]
+	c.mu.RUnlock()
+
+	return shard, nil
+}
+
+func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
+	if shardName == "" {
+		return c.Random()
+	}
+
+	c.mu.RLock()
+	shard := c.shards[shardName]
+	c.mu.RUnlock()
+	return shard, nil
+}
+
+func (c *ringShards) Random() (*ringShard, error) {
+	return c.GetByKey(strconv.Itoa(rand.Int()))
+}
+
+// heartbeat monitors state of each shard in the ring.
+func (c *ringShards) Heartbeat(frequency time.Duration) {
+	ticker := time.NewTicker(frequency)
+	defer ticker.Stop()
+
+	ctx := context.Background()
+	for range ticker.C {
+		var rebalance bool
+
+		for _, shard := range c.List() {
+			err := shard.Client.Ping(ctx).Err()
+			isUp := err == nil || err == pool.ErrPoolTimeout
+			if shard.Vote(isUp) {
+				internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
+				rebalance = true
+			}
+		}
+
+		if rebalance {
+			c.rebalance()
+		}
+	}
+}
+
+// rebalance removes dead shards from the Ring.
+func (c *ringShards) rebalance() {
+	c.mu.RLock()
+	shards := c.shards
+	c.mu.RUnlock()
+
+	liveShards := make([]string, 0, len(shards))
+
+	for name, shard := range shards {
+		if shard.IsUp() {
+			liveShards = append(liveShards, name)
+		}
+	}
+
+	hash := c.opt.NewConsistentHash(liveShards)
+
+	c.mu.Lock()
+	c.hash = hash
+	c.numShard = len(liveShards)
+	c.mu.Unlock()
+}
+
+func (c *ringShards) Len() int {
+	c.mu.RLock()
+	l := c.numShard
+	c.mu.RUnlock()
+	return l
+}
+
+func (c *ringShards) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.closed {
+		return nil
+	}
+	c.closed = true
+
+	var firstErr error
+	for _, shard := range c.shards {
+		if err := shard.Client.Close(); err != nil && firstErr == nil {
+			firstErr = err
+		}
+	}
+	c.hash = nil
+	c.shards = nil
+	c.list = nil
+
+	return firstErr
+}
+
+//------------------------------------------------------------------------------
+
+type ring struct {
+	opt           *RingOptions
+	shards        *ringShards
+	cmdsInfoCache *cmdsInfoCache //nolint:structcheck
+}
+
+// Ring is a Redis client that uses consistent hashing to distribute
+// keys across multiple Redis servers (shards). It's safe for
+// concurrent use by multiple goroutines.
+//
+// Ring monitors the state of each shard and removes dead shards from
+// the ring. When a shard comes online it is added back to the ring. This
+// gives you maximum availability and partition tolerance, but no
+// consistency between different shards or even clients. Each client
+// uses shards that are available to the client and does not do any
+// coordination when shard state is changed.
+//
+// Ring should be used when you need multiple Redis servers for caching
+// and can tolerate losing data when one of the servers dies.
+// Otherwise you should use Redis Cluster.
+type Ring struct {
+	*ring
+	cmdable
+	hooks
+	ctx context.Context
+}
+
+func NewRing(opt *RingOptions) *Ring {
+	opt.init()
+
+	ring := Ring{
+		ring: &ring{
+			opt:    opt,
+			shards: newRingShards(opt),
+		},
+		ctx: context.Background(),
+	}
+
+	ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
+	ring.cmdable = ring.Process
+
+	go ring.shards.Heartbeat(opt.HeartbeatFrequency)
+
+	return &ring
+}
+
+func (c *Ring) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Ring) WithContext(ctx context.Context) *Ring {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.cmdable = clone.Process
+	clone.hooks.lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+// Do creates a Cmd from the args and processes the cmd.
+func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
+	cmd := NewCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.process)
+}
+
+// Options returns read-only Options that were used to create the client.
+func (c *Ring) Options() *RingOptions {
+	return c.opt
+}
+
+func (c *Ring) retryBackoff(attempt int) time.Duration {
+	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
+}
+
+// PoolStats returns accumulated connection pool stats.
+func (c *Ring) PoolStats() *PoolStats {
+	shards := c.shards.List()
+	var acc PoolStats
+	for _, shard := range shards {
+		s := shard.Client.connPool.Stats()
+		acc.Hits += s.Hits
+		acc.Misses += s.Misses
+		acc.Timeouts += s.Timeouts
+		acc.TotalConns += s.TotalConns
+		acc.IdleConns += s.IdleConns
+	}
+	return &acc
+}
+
+// Len returns the current number of shards in the ring.
+func (c *Ring) Len() int {
+	return c.shards.Len()
+}
+
+// Subscribe subscribes the client to the specified channels.
+func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	if len(channels) == 0 {
+		panic("at least one channel is required")
+	}
+
+	shard, err := c.shards.GetByKey(channels[0])
+	if err != nil {
+		// TODO: return PubSub with sticky error
+		panic(err)
+	}
+	return shard.Client.Subscribe(ctx, channels...)
+}
+
+// PSubscribe subscribes the client to the given patterns.
+func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	if len(channels) == 0 {
+		panic("at least one channel is required")
+	}
+
+	shard, err := c.shards.GetByKey(channels[0])
+	if err != nil {
+		// TODO: return PubSub with sticky error
+		panic(err)
+	}
+	return shard.Client.PSubscribe(ctx, channels...)
+}
+
+// ForEachShard concurrently calls the fn on each live shard in the ring.
+// It returns the first error if any.
+func (c *Ring) ForEachShard(
+	ctx context.Context,
+	fn func(ctx context.Context, client *Client) error,
+) error {
+	shards := c.shards.List()
+	var wg sync.WaitGroup
+	errCh := make(chan error, 1)
+	for _, shard := range shards {
+		if shard.IsDown() {
+			continue
+		}
+
+		wg.Add(1)
+		go func(shard *ringShard) {
+			defer wg.Done()
+			err := fn(ctx, shard.Client)
+			if err != nil {
+				select {
+				case errCh <- err:
+				default:
+				}
+			}
+		}(shard)
+	}
+	wg.Wait()
+
+	select {
+	case err := <-errCh:
+		return err
+	default:
+		return nil
+	}
+}
+
+func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
+	shards := c.shards.List()
+	var firstErr error
+	for _, shard := range shards {
+		cmdsInfo, err := shard.Client.Command(ctx).Result()
+		if err == nil {
+			return cmdsInfo, nil
+		}
+		if firstErr == nil {
+			firstErr = err
+		}
+	}
+	if firstErr == nil {
+		return nil, errRingShardsDown
+	}
+	return nil, firstErr
+}
+
+func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
+	cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
+	if err != nil {
+		return nil
+	}
+	info := cmdsInfo[name]
+	if info == nil {
+		internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
+	}
+	return info
+}
+
+func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
+	cmdInfo := c.cmdInfo(ctx, cmd.Name())
+	pos := cmdFirstKeyPos(cmd, cmdInfo)
+	if pos == 0 {
+		return c.shards.Random()
+	}
+	firstKey := cmd.stringArg(pos)
+	return c.shards.GetByKey(firstKey)
+}
+
+func (c *Ring) process(ctx context.Context, cmd Cmder) error {
+	var lastErr error
+	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
+		if attempt > 0 {
+			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
+				return err
+			}
+		}
+
+		shard, err := c.cmdShard(ctx, cmd)
+		if err != nil {
+			return err
+		}
+
+		lastErr = shard.Client.Process(ctx, cmd)
+		if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
+			return lastErr
+		}
+	}
+	return lastErr
+}
+
+func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return c.generalProcessPipeline(ctx, cmds, false)
+	})
+}
+
+func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+func (c *Ring) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx:  c.ctx,
+		exec: c.processTxPipeline,
+	}
+	pipe.init()
+	return &pipe
+}
+
+func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
+	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
+		return c.generalProcessPipeline(ctx, cmds, true)
+	})
+}
+
+func (c *Ring) generalProcessPipeline(
+	ctx context.Context, cmds []Cmder, tx bool,
+) error {
+	cmdsMap := make(map[string][]Cmder)
+	for _, cmd := range cmds {
+		cmdInfo := c.cmdInfo(ctx, cmd.Name())
+		hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
+		if hash != "" {
+			hash = c.shards.Hash(hash)
+		}
+		cmdsMap[hash] = append(cmdsMap[hash], cmd)
+	}
+
+	var wg sync.WaitGroup
+	for hash, cmds := range cmdsMap {
+		wg.Add(1)
+		go func(hash string, cmds []Cmder) {
+			defer wg.Done()
+
+			_ = c.processShardPipeline(ctx, hash, cmds, tx)
+		}(hash, cmds)
+	}
+
+	wg.Wait()
+	return cmdsFirstErr(cmds)
+}
+
+func (c *Ring) processShardPipeline(
+	ctx context.Context, hash string, cmds []Cmder, tx bool,
+) error {
+	// TODO: retry?
+	shard, err := c.shards.GetByName(hash)
+	if err != nil {
+		setCmdsErr(cmds, err)
+		return err
+	}
+
+	if tx {
+		return shard.Client.processTxPipeline(ctx, cmds)
+	}
+	return shard.Client.processPipeline(ctx, cmds)
+}
+
+func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	if len(keys) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one key")
+	}
+
+	var shards []*ringShard
+	for _, key := range keys {
+		if key != "" {
+			shard, err := c.shards.GetByKey(hashtag.Key(key))
+			if err != nil {
+				return err
+			}
+
+			shards = append(shards, shard)
+		}
+	}
+
+	if len(shards) == 0 {
+		return fmt.Errorf("redis: Watch requires at least one shard")
+	}
+
+	if len(shards) > 1 {
+		for _, shard := range shards[1:] {
+			if shard.Client != shards[0].Client {
+				err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
+				return err
+			}
+		}
+	}
+
+	return shards[0].Client.Watch(ctx, fn, keys...)
+}
+
+// Close closes the ring client, releasing any open resources.
+//
+// It is rare to Close a Ring, as the Ring is meant to be long-lived
+// and shared between many goroutines.
+func (c *Ring) Close() error {
+	return c.shards.Close()
+}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
new file mode 100644
index 0000000..5cab18d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/script.go
@@ -0,0 +1,65 @@
+package redis
+
+import (
+	"context"
+	"crypto/sha1"
+	"encoding/hex"
+	"io"
+	"strings"
+)
+
+type Scripter interface {
+	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
+	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
+	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
+	ScriptLoad(ctx context.Context, script string) *StringCmd
+}
+
+var (
+	_ Scripter = (*Client)(nil)
+	_ Scripter = (*Ring)(nil)
+	_ Scripter = (*ClusterClient)(nil)
+)
+
+type Script struct {
+	src, hash string
+}
+
+func NewScript(src string) *Script {
+	h := sha1.New()
+	_, _ = io.WriteString(h, src)
+	return &Script{
+		src:  src,
+		hash: hex.EncodeToString(h.Sum(nil)),
+	}
+}
+
+func (s *Script) Hash() string {
+	return s.hash
+}
+
+func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
+	return c.ScriptLoad(ctx, s.src)
+}
+
+func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
+	return c.ScriptExists(ctx, s.hash)
+}
+
+func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+	return c.Eval(ctx, s.src, keys, args...)
+}
+
+func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+	return c.EvalSha(ctx, s.hash, keys, args...)
+}
+
+// Run optimistically uses EVALSHA to run the script. If script does not exist
+// it is retried using EVAL.
+func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
+	r := s.EvalSha(ctx, c, keys, args...)
+	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
+		return s.Eval(ctx, c, keys, args...)
+	}
+	return r
+}
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
new file mode 100644
index 0000000..ec6221d
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/sentinel.go
@@ -0,0 +1,796 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"errors"
+	"net"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v8/internal"
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/rand"
+)
+
+//------------------------------------------------------------------------------
+
+// FailoverOptions are used to configure a failover client and should
+// be passed to NewFailoverClient.
+type FailoverOptions struct {
+	// The master name.
+	MasterName string
+	// A seed list of host:port addresses of sentinel nodes.
+	SentinelAddrs []string
+
+	// If specified with SentinelPassword, enables ACL-based authentication (via
+	// AUTH <user> <pass>).
+	SentinelUsername string
+	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel
+	// configuration, or, if SentinelUsername is also supplied, used for ACL-based
+	// authentication.
+	SentinelPassword string
+
+	// Allows routing read-only commands to the closest master or slave node.
+	// This option only works with NewFailoverClusterClient.
+	RouteByLatency bool
+	// Allows routing read-only commands to the random master or slave node.
+	// This option only works with NewFailoverClusterClient.
+	RouteRandomly bool
+
+	// Route all commands to slave read-only nodes.
+	SlaveOnly bool
+
+	// Use slaves disconnected with master when cannot get connected slaves
+	// Now, this option only works in RandomSlaveAddr function.
+	UseDisconnectedSlaves bool
+
+	// Following options are copied from Options struct.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username string
+	Password string
+	DB       int
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+}
+
+func (opt *FailoverOptions) clientOptions() *Options {
+	return &Options{
+		Addr: "FailoverClient",
+
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		DB:       opt.DB,
+		Username: opt.Username,
+		Password: opt.Password,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
+	return &Options{
+		Addr: addr,
+
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		DB:       0,
+		Username: opt.SentinelUsername,
+		Password: opt.SentinelPassword,
+
+		MaxRetries:      opt.MaxRetries,
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
+	return &ClusterOptions{
+		Dialer:    opt.Dialer,
+		OnConnect: opt.OnConnect,
+
+		Username: opt.Username,
+		Password: opt.Password,
+
+		MaxRedirects: opt.MaxRetries,
+
+		RouteByLatency: opt.RouteByLatency,
+		RouteRandomly:  opt.RouteRandomly,
+
+		MinRetryBackoff: opt.MinRetryBackoff,
+		MaxRetryBackoff: opt.MaxRetryBackoff,
+
+		DialTimeout:  opt.DialTimeout,
+		ReadTimeout:  opt.ReadTimeout,
+		WriteTimeout: opt.WriteTimeout,
+
+		PoolFIFO:           opt.PoolFIFO,
+		PoolSize:           opt.PoolSize,
+		PoolTimeout:        opt.PoolTimeout,
+		IdleTimeout:        opt.IdleTimeout,
+		IdleCheckFrequency: opt.IdleCheckFrequency,
+		MinIdleConns:       opt.MinIdleConns,
+		MaxConnAge:         opt.MaxConnAge,
+
+		TLSConfig: opt.TLSConfig,
+	}
+}
+
+// NewFailoverClient returns a Redis client that uses Redis Sentinel
+// for automatic failover. It's safe for concurrent use by multiple
+// goroutines.
+func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
+	if failoverOpt.RouteByLatency {
+		panic("to route commands by latency, use NewFailoverClusterClient")
+	}
+	if failoverOpt.RouteRandomly {
+		panic("to route commands randomly, use NewFailoverClusterClient")
+	}
+
+	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+	copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+	rand.Shuffle(len(sentinelAddrs), func(i, j int) {
+		sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
+	})
+
+	failover := &sentinelFailover{
+		opt:           failoverOpt,
+		sentinelAddrs: sentinelAddrs,
+	}
+
+	opt := failoverOpt.clientOptions()
+	opt.Dialer = masterSlaveDialer(failover)
+	opt.init()
+
+	connPool := newConnPool(opt)
+
+	failover.mu.Lock()
+	failover.onFailover = func(ctx context.Context, addr string) {
+		_ = connPool.Filter(func(cn *pool.Conn) bool {
+			return cn.RemoteAddr().String() != addr
+		})
+	}
+	failover.mu.Unlock()
+
+	c := Client{
+		baseClient: newBaseClient(opt, connPool),
+		ctx:        context.Background(),
+	}
+	c.cmdable = c.Process
+	c.onClose = failover.Close
+
+	return &c
+}
+
+func masterSlaveDialer(
+	failover *sentinelFailover,
+) func(ctx context.Context, network, addr string) (net.Conn, error) {
+	return func(ctx context.Context, network, _ string) (net.Conn, error) {
+		var addr string
+		var err error
+
+		if failover.opt.SlaveOnly {
+			addr, err = failover.RandomSlaveAddr(ctx)
+		} else {
+			addr, err = failover.MasterAddr(ctx)
+			if err == nil {
+				failover.trySwitchMaster(ctx, addr)
+			}
+		}
+		if err != nil {
+			return nil, err
+		}
+		if failover.opt.Dialer != nil {
+			return failover.opt.Dialer(ctx, network, addr)
+		}
+
+		netDialer := &net.Dialer{
+			Timeout:   failover.opt.DialTimeout,
+			KeepAlive: 5 * time.Minute,
+		}
+		if failover.opt.TLSConfig == nil {
+			return netDialer.DialContext(ctx, network, addr)
+		}
+		return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
+	}
+}
+
+//------------------------------------------------------------------------------
+
+// SentinelClient is a client for a Redis Sentinel.
+type SentinelClient struct {
+	*baseClient
+	hooks
+	ctx context.Context
+}
+
+func NewSentinelClient(opt *Options) *SentinelClient {
+	opt.init()
+	c := &SentinelClient{
+		baseClient: &baseClient{
+			opt:      opt,
+			connPool: newConnPool(opt),
+		},
+		ctx: context.Background(),
+	}
+	return c
+}
+
+func (c *SentinelClient) Context() context.Context {
+	return c.ctx
+}
+
+func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.ctx = ctx
+	return &clone
+}
+
+func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+func (c *SentinelClient) pubSub() *PubSub {
+	pubsub := &PubSub{
+		opt: c.opt,
+
+		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
+			return c.newConn(ctx)
+		},
+		closeConn: c.connPool.CloseConn,
+	}
+	pubsub.init()
+	return pubsub
+}
+
+// Ping is used to test if a connection is still alive, or to
+// measure latency.
+func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
+	cmd := NewStringCmd(ctx, "ping")
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Subscribe subscribes the client to the specified channels.
+// Channels can be omitted to create empty subscription.
+func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.Subscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+// PSubscribe subscribes the client to the given patterns.
+// Patterns can be omitted to create empty subscription.
+func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
+	pubsub := c.pubSub()
+	if len(channels) > 0 {
+		_ = pubsub.PSubscribe(ctx, channels...)
+	}
+	return pubsub
+}
+
+func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
+	cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Failover forces a failover as if the master was not reachable, and without
+// asking for agreement to other Sentinels.
+func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Reset resets all the masters with matching name. The pattern argument is a
+// glob-style pattern. The reset process clears any previous state in a master
+// (including a failover in progress), and removes every slave and sentinel
+// already discovered and associated with the master.
+func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
+	cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// FlushConfig forces Sentinel to rewrite its configuration on disk, including
+// the current Sentinel state.
+func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
+	cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Master shows the state and info of the specified master.
+func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
+	cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Masters shows a list of monitored masters and their state.
+func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "sentinel", "masters")
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Slaves shows a list of slaves for the specified master and their state.
+func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
+	cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// CkQuorum checks if the current Sentinel configuration is able to reach the
+// quorum needed to failover a master, and the majority needed to authorize the
+// failover. This command should be used in monitoring systems to check if a
+// Sentinel deployment is ok.
+func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Monitor tells the Sentinel to start monitoring a new master with the specified
+// name, ip, port, and quorum.
+func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Set is used in order to change configuration parameters of a specific master.
+func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Remove is used in order to remove the specified master: the master will no
+// longer be monitored, and will totally be removed from the internal state of
+// the Sentinel.
+func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
+	cmd := NewStringCmd(ctx, "sentinel", "remove", name)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+//------------------------------------------------------------------------------
+
+type sentinelFailover struct {
+	opt *FailoverOptions
+
+	sentinelAddrs []string
+
+	onFailover func(ctx context.Context, addr string)
+	onUpdate   func(ctx context.Context)
+
+	mu          sync.RWMutex
+	_masterAddr string
+	sentinel    *SentinelClient
+	pubsub      *PubSub
+}
+
+func (c *sentinelFailover) Close() error {
+	c.mu.Lock()
+	defer c.mu.Unlock()
+	if c.sentinel != nil {
+		return c.closeSentinel()
+	}
+	return nil
+}
+
+func (c *sentinelFailover) closeSentinel() error {
+	firstErr := c.pubsub.Close()
+	c.pubsub = nil
+
+	err := c.sentinel.Close()
+	if err != nil && firstErr == nil {
+		firstErr = err
+	}
+	c.sentinel = nil
+
+	return firstErr
+}
+
+func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
+	if c.opt == nil {
+		return "", errors.New("opt is nil")
+	}
+
+	addresses, err := c.slaveAddrs(ctx, false)
+	if err != nil {
+		return "", err
+	}
+
+	if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
+		addresses, err = c.slaveAddrs(ctx, true)
+		if err != nil {
+			return "", err
+		}
+	}
+
+	if len(addresses) == 0 {
+		return c.MasterAddr(ctx)
+	}
+	return addresses[rand.Intn(len(addresses))], nil
+}
+
+func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
+	c.mu.RLock()
+	sentinel := c.sentinel
+	c.mu.RUnlock()
+
+	if sentinel != nil {
+		addr := c.getMasterAddr(ctx, sentinel)
+		if addr != "" {
+			return addr, nil
+		}
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.sentinel != nil {
+		addr := c.getMasterAddr(ctx, c.sentinel)
+		if addr != "" {
+			return addr, nil
+		}
+		_ = c.closeSentinel()
+	}
+
+	for i, sentinelAddr := range c.sentinelAddrs {
+		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+		masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+		if err != nil {
+			internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
+				c.opt.MasterName, err)
+			_ = sentinel.Close()
+			continue
+		}
+
+		// Push working sentinel to the top.
+		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+		c.setSentinel(ctx, sentinel)
+
+		addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
+		return addr, nil
+	}
+
+	return "", errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
+	c.mu.RLock()
+	sentinel := c.sentinel
+	c.mu.RUnlock()
+
+	if sentinel != nil {
+		addrs := c.getSlaveAddrs(ctx, sentinel)
+		if len(addrs) > 0 {
+			return addrs, nil
+		}
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if c.sentinel != nil {
+		addrs := c.getSlaveAddrs(ctx, c.sentinel)
+		if len(addrs) > 0 {
+			return addrs, nil
+		}
+		_ = c.closeSentinel()
+	}
+
+	var sentinelReachable bool
+
+	for i, sentinelAddr := range c.sentinelAddrs {
+		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
+
+		slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+		if err != nil {
+			internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
+				c.opt.MasterName, err)
+			_ = sentinel.Close()
+			continue
+		}
+		sentinelReachable = true
+		addrs := parseSlaveAddrs(slaves, useDisconnected)
+		if len(addrs) == 0 {
+			continue
+		}
+		// Push working sentinel to the top.
+		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
+		c.setSentinel(ctx, sentinel)
+
+		return addrs, nil
+	}
+
+	if sentinelReachable {
+		return []string{}, nil
+	}
+	return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
+}
+
+func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
+	addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
+	if err != nil {
+		internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
+			c.opt.MasterName, err)
+		return ""
+	}
+	return net.JoinHostPort(addr[0], addr[1])
+}
+
+func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
+	addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
+	if err != nil {
+		internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
+			c.opt.MasterName, err)
+		return []string{}
+	}
+	return parseSlaveAddrs(addrs, false)
+}
+
+func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
+	nodes := make([]string, 0, len(addrs))
+	for _, node := range addrs {
+		ip := ""
+		port := ""
+		flags := []string{}
+		lastkey := ""
+		isDown := false
+
+		for _, key := range node.([]interface{}) {
+			switch lastkey {
+			case "ip":
+				ip = key.(string)
+			case "port":
+				port = key.(string)
+			case "flags":
+				flags = strings.Split(key.(string), ",")
+			}
+			lastkey = key.(string)
+		}
+
+		for _, flag := range flags {
+			switch flag {
+			case "s_down", "o_down":
+				isDown = true
+			case "disconnected":
+				if !keepDisconnected {
+					isDown = true
+				}
+			}
+		}
+
+		if !isDown {
+			nodes = append(nodes, net.JoinHostPort(ip, port))
+		}
+	}
+
+	return nodes
+}
+
+func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
+	c.mu.RLock()
+	currentAddr := c._masterAddr //nolint:ifshort
+	c.mu.RUnlock()
+
+	if addr == currentAddr {
+		return
+	}
+
+	c.mu.Lock()
+	defer c.mu.Unlock()
+
+	if addr == c._masterAddr {
+		return
+	}
+	c._masterAddr = addr
+
+	internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
+		c.opt.MasterName, addr)
+	if c.onFailover != nil {
+		c.onFailover(ctx, addr)
+	}
+}
+
+func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
+	if c.sentinel != nil {
+		panic("not reached")
+	}
+	c.sentinel = sentinel
+	c.discoverSentinels(ctx)
+
+	c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
+	go c.listen(c.pubsub)
+}
+
+func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
+	sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
+	if err != nil {
+		internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
+		return
+	}
+	for _, sentinel := range sentinels {
+		vals := sentinel.([]interface{})
+		var ip, port string
+		for i := 0; i < len(vals); i += 2 {
+			key := vals[i].(string)
+			switch key {
+			case "ip":
+				ip = vals[i+1].(string)
+			case "port":
+				port = vals[i+1].(string)
+			}
+		}
+		if ip != "" && port != "" {
+			sentinelAddr := net.JoinHostPort(ip, port)
+			if !contains(c.sentinelAddrs, sentinelAddr) {
+				internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
+					sentinelAddr, c.opt.MasterName)
+				c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
+			}
+		}
+	}
+}
+
+func (c *sentinelFailover) listen(pubsub *PubSub) {
+	ctx := context.TODO()
+
+	if c.onUpdate != nil {
+		c.onUpdate(ctx)
+	}
+
+	ch := pubsub.Channel()
+	for msg := range ch {
+		if msg.Channel == "+switch-master" {
+			parts := strings.Split(msg.Payload, " ")
+			if parts[0] != c.opt.MasterName {
+				internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
+				continue
+			}
+			addr := net.JoinHostPort(parts[3], parts[4])
+			c.trySwitchMaster(pubsub.getContext(), addr)
+		}
+
+		if c.onUpdate != nil {
+			c.onUpdate(ctx)
+		}
+	}
+}
+
+func contains(slice []string, str string) bool {
+	for _, s := range slice {
+		if s == str {
+			return true
+		}
+	}
+	return false
+}
+
+//------------------------------------------------------------------------------
+
+// NewFailoverClusterClient returns a client that supports routing read-only commands
+// to a slave node.
+func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
+	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
+	copy(sentinelAddrs, failoverOpt.SentinelAddrs)
+
+	failover := &sentinelFailover{
+		opt:           failoverOpt,
+		sentinelAddrs: sentinelAddrs,
+	}
+
+	opt := failoverOpt.clusterOptions()
+	opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
+		masterAddr, err := failover.MasterAddr(ctx)
+		if err != nil {
+			return nil, err
+		}
+
+		nodes := []ClusterNode{{
+			Addr: masterAddr,
+		}}
+
+		slaveAddrs, err := failover.slaveAddrs(ctx, false)
+		if err != nil {
+			return nil, err
+		}
+
+		for _, slaveAddr := range slaveAddrs {
+			nodes = append(nodes, ClusterNode{
+				Addr: slaveAddr,
+			})
+		}
+
+		slots := []ClusterSlot{
+			{
+				Start: 0,
+				End:   16383,
+				Nodes: nodes,
+			},
+		}
+		return slots, nil
+	}
+
+	c := NewClusterClient(opt)
+
+	failover.mu.Lock()
+	failover.onUpdate = func(ctx context.Context) {
+		c.ReloadState(ctx)
+	}
+	failover.mu.Unlock()
+
+	return c
+}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
new file mode 100644
index 0000000..8c9d872
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/tx.go
@@ -0,0 +1,149 @@
+package redis
+
+import (
+	"context"
+
+	"github.com/go-redis/redis/v8/internal/pool"
+	"github.com/go-redis/redis/v8/internal/proto"
+)
+
+// TxFailedErr transaction redis failed.
+const TxFailedErr = proto.RedisError("redis: transaction failed")
+
+// Tx implements Redis transactions as described in
+// http://redis.io/topics/transactions. It's NOT safe for concurrent use
+// by multiple goroutines, because Exec resets list of watched keys.
+//
+// If you don't need WATCH, use Pipeline instead.
+type Tx struct {
+	baseClient
+	cmdable
+	statefulCmdable
+	hooks
+	ctx context.Context
+}
+
+func (c *Client) newTx(ctx context.Context) *Tx {
+	tx := Tx{
+		baseClient: baseClient{
+			opt:      c.opt,
+			connPool: pool.NewStickyConnPool(c.connPool),
+		},
+		hooks: c.hooks.clone(),
+		ctx:   ctx,
+	}
+	tx.init()
+	return &tx
+}
+
+func (c *Tx) init() {
+	c.cmdable = c.Process
+	c.statefulCmdable = c.Process
+}
+
+func (c *Tx) Context() context.Context {
+	return c.ctx
+}
+
+func (c *Tx) WithContext(ctx context.Context) *Tx {
+	if ctx == nil {
+		panic("nil context")
+	}
+	clone := *c
+	clone.init()
+	clone.hooks.lock()
+	clone.ctx = ctx
+	return &clone
+}
+
+func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
+	return c.hooks.process(ctx, cmd, c.baseClient.process)
+}
+
+// Watch prepares a transaction and marks the keys to be watched
+// for conditional execution if there are any keys.
+//
+// The transaction is automatically closed when fn exits.
+func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
+	tx := c.newTx(ctx)
+	defer tx.Close(ctx)
+	if len(keys) > 0 {
+		if err := tx.Watch(ctx, keys...).Err(); err != nil {
+			return err
+		}
+	}
+	return fn(tx)
+}
+
+// Close closes the transaction, releasing any open resources.
+func (c *Tx) Close(ctx context.Context) error {
+	_ = c.Unwatch(ctx).Err()
+	return c.baseClient.Close()
+}
+
+// Watch marks the keys to be watched for conditional execution
+// of a transaction.
+func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "watch"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Unwatch flushes all the previously watched keys for a transaction.
+func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
+	args := make([]interface{}, 1+len(keys))
+	args[0] = "unwatch"
+	for i, key := range keys {
+		args[1+i] = key
+	}
+	cmd := NewStatusCmd(ctx, args...)
+	_ = c.Process(ctx, cmd)
+	return cmd
+}
+
+// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
+func (c *Tx) Pipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx: c.ctx,
+		exec: func(ctx context.Context, cmds []Cmder) error {
+			return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
+		},
+	}
+	pipe.init()
+	return &pipe
+}
+
+// Pipelined executes commands queued in the fn outside of the transaction.
+// Use TxPipelined if you need transactional behavior.
+func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.Pipeline().Pipelined(ctx, fn)
+}
+
+// TxPipelined executes commands queued in the fn in the transaction.
+//
+// When using WATCH, EXEC will execute commands only if the watched keys
+// were not modified, allowing for a check-and-set mechanism.
+//
+// Exec always returns list of commands. If transaction fails
+// TxFailedErr is returned. Otherwise Exec returns an error of the first
+// failed command or nil.
+func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
+	return c.TxPipeline().Pipelined(ctx, fn)
+}
+
+// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
+func (c *Tx) TxPipeline() Pipeliner {
+	pipe := Pipeline{
+		ctx: c.ctx,
+		exec: func(ctx context.Context, cmds []Cmder) error {
+			return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
+		},
+	}
+	pipe.init()
+	return &pipe
+}
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
new file mode 100644
index 0000000..c89b3e5
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/universal.go
@@ -0,0 +1,215 @@
+package redis
+
+import (
+	"context"
+	"crypto/tls"
+	"net"
+	"time"
+)
+
+// UniversalOptions information is required by UniversalClient to establish
+// connections.
+type UniversalOptions struct {
+	// Either a single address or a seed list of host:port addresses
+	// of cluster/sentinel nodes.
+	Addrs []string
+
+	// Database to be selected after connecting to the server.
+	// Only single-node and failover clients.
+	DB int
+
+	// Common options.
+
+	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error)
+	OnConnect func(ctx context.Context, cn *Conn) error
+
+	Username         string
+	Password         string
+	SentinelUsername string
+	SentinelPassword string
+
+	MaxRetries      int
+	MinRetryBackoff time.Duration
+	MaxRetryBackoff time.Duration
+
+	DialTimeout  time.Duration
+	ReadTimeout  time.Duration
+	WriteTimeout time.Duration
+
+	// PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
+	PoolFIFO bool
+
+	PoolSize           int
+	MinIdleConns       int
+	MaxConnAge         time.Duration
+	PoolTimeout        time.Duration
+	IdleTimeout        time.Duration
+	IdleCheckFrequency time.Duration
+
+	TLSConfig *tls.Config
+
+	// Only cluster clients.
+
+	MaxRedirects   int
+	ReadOnly       bool
+	RouteByLatency bool
+	RouteRandomly  bool
+
+	// The sentinel master name.
+	// Only failover clients.
+
+	MasterName string
+}
+
+// Cluster returns cluster options created from the universal options.
+func (o *UniversalOptions) Cluster() *ClusterOptions {
+	if len(o.Addrs) == 0 {
+		o.Addrs = []string{"127.0.0.1:6379"}
+	}
+
+	return &ClusterOptions{
+		Addrs:     o.Addrs,
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		Username: o.Username,
+		Password: o.Password,
+
+		MaxRedirects:   o.MaxRedirects,
+		ReadOnly:       o.ReadOnly,
+		RouteByLatency: o.RouteByLatency,
+		RouteRandomly:  o.RouteRandomly,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:        o.DialTimeout,
+		ReadTimeout:        o.ReadTimeout,
+		WriteTimeout:       o.WriteTimeout,
+		PoolFIFO:           o.PoolFIFO,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// Failover returns failover options created from the universal options.
+func (o *UniversalOptions) Failover() *FailoverOptions {
+	if len(o.Addrs) == 0 {
+		o.Addrs = []string{"127.0.0.1:26379"}
+	}
+
+	return &FailoverOptions{
+		SentinelAddrs: o.Addrs,
+		MasterName:    o.MasterName,
+
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		DB:               o.DB,
+		Username:         o.Username,
+		Password:         o.Password,
+		SentinelUsername: o.SentinelUsername,
+		SentinelPassword: o.SentinelPassword,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:  o.DialTimeout,
+		ReadTimeout:  o.ReadTimeout,
+		WriteTimeout: o.WriteTimeout,
+
+		PoolFIFO:           o.PoolFIFO,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// Simple returns basic options created from the universal options.
+func (o *UniversalOptions) Simple() *Options {
+	addr := "127.0.0.1:6379"
+	if len(o.Addrs) > 0 {
+		addr = o.Addrs[0]
+	}
+
+	return &Options{
+		Addr:      addr,
+		Dialer:    o.Dialer,
+		OnConnect: o.OnConnect,
+
+		DB:       o.DB,
+		Username: o.Username,
+		Password: o.Password,
+
+		MaxRetries:      o.MaxRetries,
+		MinRetryBackoff: o.MinRetryBackoff,
+		MaxRetryBackoff: o.MaxRetryBackoff,
+
+		DialTimeout:  o.DialTimeout,
+		ReadTimeout:  o.ReadTimeout,
+		WriteTimeout: o.WriteTimeout,
+
+		PoolFIFO:           o.PoolFIFO,
+		PoolSize:           o.PoolSize,
+		MinIdleConns:       o.MinIdleConns,
+		MaxConnAge:         o.MaxConnAge,
+		PoolTimeout:        o.PoolTimeout,
+		IdleTimeout:        o.IdleTimeout,
+		IdleCheckFrequency: o.IdleCheckFrequency,
+
+		TLSConfig: o.TLSConfig,
+	}
+}
+
+// --------------------------------------------------------------------
+
+// UniversalClient is an abstract client which - based on the provided options -
+// represents either a ClusterClient, a FailoverClient, or a single-node Client.
+// This can be useful for testing cluster-specific applications locally or having different
+// clients in different environments.
+type UniversalClient interface {
+	Cmdable
+	Context() context.Context
+	AddHook(Hook)
+	Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
+	Do(ctx context.Context, args ...interface{}) *Cmd
+	Process(ctx context.Context, cmd Cmder) error
+	Subscribe(ctx context.Context, channels ...string) *PubSub
+	PSubscribe(ctx context.Context, channels ...string) *PubSub
+	Close() error
+	PoolStats() *PoolStats
+}
+
+var (
+	_ UniversalClient = (*Client)(nil)
+	_ UniversalClient = (*ClusterClient)(nil)
+	_ UniversalClient = (*Ring)(nil)
+)
+
+// NewUniversalClient returns a new multi client. The type of the returned client depends
+// on the following conditions:
+//
+// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
+// 2. if the number of Addrs is two or more, a ClusterClient is returned.
+// 3. Otherwise, a single-node Client is returned.
+func NewUniversalClient(opts *UniversalOptions) UniversalClient {
+	if opts.MasterName != "" {
+		return NewFailoverClient(opts.Failover())
+	} else if len(opts.Addrs) > 1 {
+		return NewClusterClient(opts.Cluster())
+	}
+	return NewClient(opts.Simple())
+}
diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go
new file mode 100644
index 0000000..112c9a2
--- /dev/null
+++ b/vendor/github.com/go-redis/redis/v8/version.go
@@ -0,0 +1,6 @@
+package redis
+
+// Version is the current release version.
+func Version() string {
+	return "8.11.5"
+}
diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS
new file mode 100644
index 0000000..3d97fc7
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/AUTHORS
@@ -0,0 +1,15 @@
+# This is the official list of GoGo authors for copyright purposes.
+# This file is distinct from the CONTRIBUTORS file, which
+# lists people.  For example, employees are listed in CONTRIBUTORS,
+# but not in AUTHORS, because the employer holds the copyright.
+
+# Names should be added to this file as one of
+#     Organization's name
+#     Individual's name <submission email address>
+#     Individual's name <submission email address> <email2> <emailN>
+
+# Please keep the list sorted.
+
+Sendgrid, Inc
+Vastech SA (PTY) LTD
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1b4f6c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS
@@ -0,0 +1,23 @@
+Anton Povarov <anton.povarov@gmail.com>
+Brian Goff <cpuguy83@gmail.com>
+Clayton Coleman <ccoleman@redhat.com>
+Denis Smirnov <denis.smirnov.91@gmail.com>
+DongYun Kang <ceram1000@gmail.com>
+Dwayne Schultz <dschultz@pivotal.io>
+Georg Apitz <gapitz@pivotal.io>
+Gustav Paul <gustav.paul@gmail.com>
+Johan Brandhorst <johan.brandhorst@gmail.com>
+John Shahid <jvshahid@gmail.com>
+John Tuley <john@tuley.org>
+Laurent <laurent@adyoulike.com>
+Patrick Lee <patrick@dropbox.com>
+Peter Edge <peter.edge@gmail.com>
+Roger Johansson <rogeralsing@gmail.com>
+Sam Nguyen <sam.nguyen@sendgrid.com>
+Sergio Arbeo <serabe@gmail.com>
+Stephen J Day <stephen.day@docker.com>
+Tamir Duberstein <tamird@gmail.com>
+Todd Eisenberger <teisenberger@dropbox.com>
+Tormod Erevik Lea <tormodlea@gmail.com>
+Vyacheslav Kim <kane@sendgrid.com>
+Walter Schulze <awalterschulze@gmail.com>
diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE
new file mode 100644
index 0000000..f57de90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/LICENSE
@@ -0,0 +1,35 @@
+Copyright (c) 2013, The GoGo Authors. All rights reserved.
+
+Protocol Buffers for Go with Gadgets
+
+Go support for Protocol Buffers - Google's data interchange format
+
+Copyright 2010 The Go Authors.  All rights reserved.
+https://github.com/golang/protobuf
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile
new file mode 100644
index 0000000..0b4659b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile
@@ -0,0 +1,37 @@
+# Protocol Buffers for Go with Gadgets
+#
+# Copyright (c) 2013, The GoGo Authors. All rights reserved.
+# http://github.com/gogo/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+	go install github.com/gogo/protobuf/protoc-gen-gogo
+	protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto
+
+restore:
+	cp gogo.pb.golden gogo.pb.go
+
+preserve:
+	cp gogo.pb.go gogo.pb.golden
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
new file mode 100644
index 0000000..081c86f
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go
@@ -0,0 +1,169 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package gogoproto provides extensions for protocol buffers to achieve:
+
+  - fast marshalling and unmarshalling.
+  - peace of mind by optionally generating test and benchmark code.
+  - more canonical Go structures.
+  - less typing by optionally generating extra helper code.
+  - goprotobuf compatibility
+
+More Canonical Go Structures
+
+A lot of time working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs.
+You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a parallel struct.
+Gogoprotobuf tries to fix these problems with the nullable, embed, customtype and customname field extensions.
+
+  - nullable, if false, a field is generated without a pointer (see warning below).
+  - embed, if true, the field is generated as an embedded field.
+  - customtype, It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128
+  - customname (beta), Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames.
+  - casttype (beta), Changes the generated fieldtype.  All generated code assumes that this type is castable to the protocol buffer field type.  It does not work for structs or enums.
+  - castkey (beta), Changes the generated fieldtype for a map key.  All generated code assumes that this type is castable to the protocol buffer field type.  Only supported on maps.
+  - castvalue (beta), Changes the generated fieldtype for a map value.  All generated code assumes that this type is castable to the protocol buffer field type.  Only supported on maps.
+
+Warning about nullable: According to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set. It can be seen as a layer on top of Protocol Buffers, where before and after marshalling all non-nullable fields are set and they cannot be unset.
+
+Let us look at:
+
+	github.com/gogo/protobuf/test/example/example.proto
+
+for a quicker overview.
+
+The following message:
+
+  package test;
+
+  import "github.com/gogo/protobuf/gogoproto/gogo.proto";
+
+	message A {
+		optional string Description = 1 [(gogoproto.nullable) = false];
+		optional int64 Number = 2 [(gogoproto.nullable) = false];
+		optional bytes Id = 3 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uuid", (gogoproto.nullable) = false];
+	}
+
+Will generate a go struct which looks a lot like this:
+
+	type A struct {
+		Description string
+		Number      int64
+		Id          github_com_gogo_protobuf_test_custom.Uuid
+	}
+
+You will see there are no pointers, since all fields are non-nullable.
+You will also see a custom type which marshals to a string.
+Be warned it is your responsibility to test your custom types thoroughly.
+You should think of every possible empty and nil case for your marshaling, unmarshaling and size methods.
+
+Next we will embed the message A in message B.
+
+	message B {
+		optional A A = 1 [(gogoproto.nullable) = false, (gogoproto.embed) = true];
+		repeated bytes G = 2 [(gogoproto.customtype) = "github.com/gogo/protobuf/test/custom.Uint128", (gogoproto.nullable) = false];
+	}
+
+See below that A is embedded in B.
+
+	type B struct {
+		A
+		G []github_com_gogo_protobuf_test_custom.Uint128
+	}
+
+Also see the repeated custom type.
+
+	type Uint128 [2]uint64
+
+Next we will create a custom name for one of our fields.
+
+	message C {
+		optional int64 size = 1 [(gogoproto.customname) = "MySize"];
+	}
+
+See below that the field's name is MySize and not Size.
+
+	type C struct {
+		MySize		*int64
+	}
+
+The is useful when having a protocol buffer message with a field name which conflicts with a generated method.
+As an example, having a field name size and using the sizer plugin to generate a Size method will cause a go compiler error.
+Using customname you can fix this error without changing the field name.
+This is typically useful when working with a protocol buffer that was designed before these methods and/or the go language were avialable.
+
+Gogoprotobuf also has some more subtle changes, these could be changed back:
+
+  - the generated package name for imports do not have the extra /filename.pb,
+  but are actually the imports specified in the .proto file.
+
+Gogoprotobuf also has lost some features which should be brought back with time:
+
+  - Marshalling and unmarshalling with reflect and without the unsafe package,
+  this requires work in pointer_reflect.go
+
+Why does nullable break protocol buffer specifications:
+
+The protocol buffer specification states, somewhere, that you should be able to tell whether a
+field is set or unset.  With the option nullable=false this feature is lost,
+since your non-nullable fields will always be set.  It can be seen as a layer on top of
+protocol buffers, where before and after marshalling all non-nullable fields are set
+and they cannot be unset.
+
+Goprotobuf Compatibility:
+
+Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers.
+Gogoprotobuf generates the same code as goprotobuf if no extensions are used.
+The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf:
+
+  - gogoproto_import, if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto.
+  - goproto_enum_prefix, if false, generates the enum constant names without the messagetype prefix
+  - goproto_enum_stringer (experimental), if false, the enum is generated without the default string method, this is useful for rather using enum_stringer, or allowing you to write your own string method.
+  - goproto_getters, if false, the message is generated without get methods, this is useful when you would rather want to use face
+  - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method.
+  - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension
+  - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields.
+  - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway).
+
+Less Typing and Peace of Mind is explained in their specific plugin folders godoc:
+
+	- github.com/gogo/protobuf/plugin/<extension_name>
+
+If you do not use any of these extension the code that is generated
+will be the same as if goprotobuf has generated it.
+
+The most complete way to see examples is to look at
+
+	github.com/gogo/protobuf/test/thetest.proto
+
+Gogoprototest is a seperate project,
+because we want to keep gogoprotobuf independent of goprotobuf,
+but we still want to test it thoroughly.
+
+*/
+package gogoproto
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
new file mode 100644
index 0000000..1e91766
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go
@@ -0,0 +1,874 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: gogo.proto
+
+package gogoproto
+
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+var E_GoprotoEnumPrefix = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         62001,
+	Name:          "gogoproto.goproto_enum_prefix",
+	Tag:           "varint,62001,opt,name=goproto_enum_prefix",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoEnumStringer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         62021,
+	Name:          "gogoproto.goproto_enum_stringer",
+	Tag:           "varint,62021,opt,name=goproto_enum_stringer",
+	Filename:      "gogo.proto",
+}
+
+var E_EnumStringer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         62022,
+	Name:          "gogoproto.enum_stringer",
+	Tag:           "varint,62022,opt,name=enum_stringer",
+	Filename:      "gogo.proto",
+}
+
+var E_EnumCustomname = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         62023,
+	Name:          "gogoproto.enum_customname",
+	Tag:           "bytes,62023,opt,name=enum_customname",
+	Filename:      "gogo.proto",
+}
+
+var E_Enumdecl = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         62024,
+	Name:          "gogoproto.enumdecl",
+	Tag:           "varint,62024,opt,name=enumdecl",
+	Filename:      "gogo.proto",
+}
+
+var E_EnumvalueCustomname = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.EnumValueOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         66001,
+	Name:          "gogoproto.enumvalue_customname",
+	Tag:           "bytes,66001,opt,name=enumvalue_customname",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoGettersAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63001,
+	Name:          "gogoproto.goproto_getters_all",
+	Tag:           "varint,63001,opt,name=goproto_getters_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63002,
+	Name:          "gogoproto.goproto_enum_prefix_all",
+	Tag:           "varint,63002,opt,name=goproto_enum_prefix_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoStringerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63003,
+	Name:          "gogoproto.goproto_stringer_all",
+	Tag:           "varint,63003,opt,name=goproto_stringer_all",
+	Filename:      "gogo.proto",
+}
+
+var E_VerboseEqualAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63004,
+	Name:          "gogoproto.verbose_equal_all",
+	Tag:           "varint,63004,opt,name=verbose_equal_all",
+	Filename:      "gogo.proto",
+}
+
+var E_FaceAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63005,
+	Name:          "gogoproto.face_all",
+	Tag:           "varint,63005,opt,name=face_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GostringAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63006,
+	Name:          "gogoproto.gostring_all",
+	Tag:           "varint,63006,opt,name=gostring_all",
+	Filename:      "gogo.proto",
+}
+
+var E_PopulateAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63007,
+	Name:          "gogoproto.populate_all",
+	Tag:           "varint,63007,opt,name=populate_all",
+	Filename:      "gogo.proto",
+}
+
+var E_StringerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63008,
+	Name:          "gogoproto.stringer_all",
+	Tag:           "varint,63008,opt,name=stringer_all",
+	Filename:      "gogo.proto",
+}
+
+var E_OnlyoneAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63009,
+	Name:          "gogoproto.onlyone_all",
+	Tag:           "varint,63009,opt,name=onlyone_all",
+	Filename:      "gogo.proto",
+}
+
+var E_EqualAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63013,
+	Name:          "gogoproto.equal_all",
+	Tag:           "varint,63013,opt,name=equal_all",
+	Filename:      "gogo.proto",
+}
+
+var E_DescriptionAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63014,
+	Name:          "gogoproto.description_all",
+	Tag:           "varint,63014,opt,name=description_all",
+	Filename:      "gogo.proto",
+}
+
+var E_TestgenAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63015,
+	Name:          "gogoproto.testgen_all",
+	Tag:           "varint,63015,opt,name=testgen_all",
+	Filename:      "gogo.proto",
+}
+
+var E_BenchgenAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63016,
+	Name:          "gogoproto.benchgen_all",
+	Tag:           "varint,63016,opt,name=benchgen_all",
+	Filename:      "gogo.proto",
+}
+
+var E_MarshalerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63017,
+	Name:          "gogoproto.marshaler_all",
+	Tag:           "varint,63017,opt,name=marshaler_all",
+	Filename:      "gogo.proto",
+}
+
+var E_UnmarshalerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63018,
+	Name:          "gogoproto.unmarshaler_all",
+	Tag:           "varint,63018,opt,name=unmarshaler_all",
+	Filename:      "gogo.proto",
+}
+
+var E_StableMarshalerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63019,
+	Name:          "gogoproto.stable_marshaler_all",
+	Tag:           "varint,63019,opt,name=stable_marshaler_all",
+	Filename:      "gogo.proto",
+}
+
+var E_SizerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63020,
+	Name:          "gogoproto.sizer_all",
+	Tag:           "varint,63020,opt,name=sizer_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63021,
+	Name:          "gogoproto.goproto_enum_stringer_all",
+	Tag:           "varint,63021,opt,name=goproto_enum_stringer_all",
+	Filename:      "gogo.proto",
+}
+
+var E_EnumStringerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63022,
+	Name:          "gogoproto.enum_stringer_all",
+	Tag:           "varint,63022,opt,name=enum_stringer_all",
+	Filename:      "gogo.proto",
+}
+
+var E_UnsafeMarshalerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63023,
+	Name:          "gogoproto.unsafe_marshaler_all",
+	Tag:           "varint,63023,opt,name=unsafe_marshaler_all",
+	Filename:      "gogo.proto",
+}
+
+var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63024,
+	Name:          "gogoproto.unsafe_unmarshaler_all",
+	Tag:           "varint,63024,opt,name=unsafe_unmarshaler_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63025,
+	Name:          "gogoproto.goproto_extensions_map_all",
+	Tag:           "varint,63025,opt,name=goproto_extensions_map_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63026,
+	Name:          "gogoproto.goproto_unrecognized_all",
+	Tag:           "varint,63026,opt,name=goproto_unrecognized_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GogoprotoImport = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63027,
+	Name:          "gogoproto.gogoproto_import",
+	Tag:           "varint,63027,opt,name=gogoproto_import",
+	Filename:      "gogo.proto",
+}
+
+var E_ProtosizerAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63028,
+	Name:          "gogoproto.protosizer_all",
+	Tag:           "varint,63028,opt,name=protosizer_all",
+	Filename:      "gogo.proto",
+}
+
+var E_CompareAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63029,
+	Name:          "gogoproto.compare_all",
+	Tag:           "varint,63029,opt,name=compare_all",
+	Filename:      "gogo.proto",
+}
+
+var E_TypedeclAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63030,
+	Name:          "gogoproto.typedecl_all",
+	Tag:           "varint,63030,opt,name=typedecl_all",
+	Filename:      "gogo.proto",
+}
+
+var E_EnumdeclAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63031,
+	Name:          "gogoproto.enumdecl_all",
+	Tag:           "varint,63031,opt,name=enumdecl_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoRegistration = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63032,
+	Name:          "gogoproto.goproto_registration",
+	Tag:           "varint,63032,opt,name=goproto_registration",
+	Filename:      "gogo.proto",
+}
+
+var E_MessagenameAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63033,
+	Name:          "gogoproto.messagename_all",
+	Tag:           "varint,63033,opt,name=messagename_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoSizecacheAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63034,
+	Name:          "gogoproto.goproto_sizecache_all",
+	Tag:           "varint,63034,opt,name=goproto_sizecache_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoUnkeyedAll = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FileOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         63035,
+	Name:          "gogoproto.goproto_unkeyed_all",
+	Tag:           "varint,63035,opt,name=goproto_unkeyed_all",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoGetters = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64001,
+	Name:          "gogoproto.goproto_getters",
+	Tag:           "varint,64001,opt,name=goproto_getters",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoStringer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64003,
+	Name:          "gogoproto.goproto_stringer",
+	Tag:           "varint,64003,opt,name=goproto_stringer",
+	Filename:      "gogo.proto",
+}
+
+var E_VerboseEqual = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64004,
+	Name:          "gogoproto.verbose_equal",
+	Tag:           "varint,64004,opt,name=verbose_equal",
+	Filename:      "gogo.proto",
+}
+
+var E_Face = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64005,
+	Name:          "gogoproto.face",
+	Tag:           "varint,64005,opt,name=face",
+	Filename:      "gogo.proto",
+}
+
+var E_Gostring = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64006,
+	Name:          "gogoproto.gostring",
+	Tag:           "varint,64006,opt,name=gostring",
+	Filename:      "gogo.proto",
+}
+
+var E_Populate = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64007,
+	Name:          "gogoproto.populate",
+	Tag:           "varint,64007,opt,name=populate",
+	Filename:      "gogo.proto",
+}
+
+var E_Stringer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         67008,
+	Name:          "gogoproto.stringer",
+	Tag:           "varint,67008,opt,name=stringer",
+	Filename:      "gogo.proto",
+}
+
+var E_Onlyone = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64009,
+	Name:          "gogoproto.onlyone",
+	Tag:           "varint,64009,opt,name=onlyone",
+	Filename:      "gogo.proto",
+}
+
+var E_Equal = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64013,
+	Name:          "gogoproto.equal",
+	Tag:           "varint,64013,opt,name=equal",
+	Filename:      "gogo.proto",
+}
+
+var E_Description = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64014,
+	Name:          "gogoproto.description",
+	Tag:           "varint,64014,opt,name=description",
+	Filename:      "gogo.proto",
+}
+
+var E_Testgen = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64015,
+	Name:          "gogoproto.testgen",
+	Tag:           "varint,64015,opt,name=testgen",
+	Filename:      "gogo.proto",
+}
+
+var E_Benchgen = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64016,
+	Name:          "gogoproto.benchgen",
+	Tag:           "varint,64016,opt,name=benchgen",
+	Filename:      "gogo.proto",
+}
+
+var E_Marshaler = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64017,
+	Name:          "gogoproto.marshaler",
+	Tag:           "varint,64017,opt,name=marshaler",
+	Filename:      "gogo.proto",
+}
+
+var E_Unmarshaler = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64018,
+	Name:          "gogoproto.unmarshaler",
+	Tag:           "varint,64018,opt,name=unmarshaler",
+	Filename:      "gogo.proto",
+}
+
+var E_StableMarshaler = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64019,
+	Name:          "gogoproto.stable_marshaler",
+	Tag:           "varint,64019,opt,name=stable_marshaler",
+	Filename:      "gogo.proto",
+}
+
+var E_Sizer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64020,
+	Name:          "gogoproto.sizer",
+	Tag:           "varint,64020,opt,name=sizer",
+	Filename:      "gogo.proto",
+}
+
+var E_UnsafeMarshaler = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64023,
+	Name:          "gogoproto.unsafe_marshaler",
+	Tag:           "varint,64023,opt,name=unsafe_marshaler",
+	Filename:      "gogo.proto",
+}
+
+var E_UnsafeUnmarshaler = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64024,
+	Name:          "gogoproto.unsafe_unmarshaler",
+	Tag:           "varint,64024,opt,name=unsafe_unmarshaler",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoExtensionsMap = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64025,
+	Name:          "gogoproto.goproto_extensions_map",
+	Tag:           "varint,64025,opt,name=goproto_extensions_map",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoUnrecognized = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64026,
+	Name:          "gogoproto.goproto_unrecognized",
+	Tag:           "varint,64026,opt,name=goproto_unrecognized",
+	Filename:      "gogo.proto",
+}
+
+var E_Protosizer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64028,
+	Name:          "gogoproto.protosizer",
+	Tag:           "varint,64028,opt,name=protosizer",
+	Filename:      "gogo.proto",
+}
+
+var E_Compare = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64029,
+	Name:          "gogoproto.compare",
+	Tag:           "varint,64029,opt,name=compare",
+	Filename:      "gogo.proto",
+}
+
+var E_Typedecl = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64030,
+	Name:          "gogoproto.typedecl",
+	Tag:           "varint,64030,opt,name=typedecl",
+	Filename:      "gogo.proto",
+}
+
+var E_Messagename = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64033,
+	Name:          "gogoproto.messagename",
+	Tag:           "varint,64033,opt,name=messagename",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoSizecache = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64034,
+	Name:          "gogoproto.goproto_sizecache",
+	Tag:           "varint,64034,opt,name=goproto_sizecache",
+	Filename:      "gogo.proto",
+}
+
+var E_GoprotoUnkeyed = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.MessageOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         64035,
+	Name:          "gogoproto.goproto_unkeyed",
+	Tag:           "varint,64035,opt,name=goproto_unkeyed",
+	Filename:      "gogo.proto",
+}
+
+var E_Nullable = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65001,
+	Name:          "gogoproto.nullable",
+	Tag:           "varint,65001,opt,name=nullable",
+	Filename:      "gogo.proto",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65002,
+	Name:          "gogoproto.embed",
+	Tag:           "varint,65002,opt,name=embed",
+	Filename:      "gogo.proto",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65003,
+	Name:          "gogoproto.customtype",
+	Tag:           "bytes,65003,opt,name=customtype",
+	Filename:      "gogo.proto",
+}
+
+var E_Customname = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65004,
+	Name:          "gogoproto.customname",
+	Tag:           "bytes,65004,opt,name=customname",
+	Filename:      "gogo.proto",
+}
+
+var E_Jsontag = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65005,
+	Name:          "gogoproto.jsontag",
+	Tag:           "bytes,65005,opt,name=jsontag",
+	Filename:      "gogo.proto",
+}
+
+var E_Moretags = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65006,
+	Name:          "gogoproto.moretags",
+	Tag:           "bytes,65006,opt,name=moretags",
+	Filename:      "gogo.proto",
+}
+
+var E_Casttype = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65007,
+	Name:          "gogoproto.casttype",
+	Tag:           "bytes,65007,opt,name=casttype",
+	Filename:      "gogo.proto",
+}
+
+var E_Castkey = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65008,
+	Name:          "gogoproto.castkey",
+	Tag:           "bytes,65008,opt,name=castkey",
+	Filename:      "gogo.proto",
+}
+
+var E_Castvalue = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         65009,
+	Name:          "gogoproto.castvalue",
+	Tag:           "bytes,65009,opt,name=castvalue",
+	Filename:      "gogo.proto",
+}
+
+var E_Stdtime = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65010,
+	Name:          "gogoproto.stdtime",
+	Tag:           "varint,65010,opt,name=stdtime",
+	Filename:      "gogo.proto",
+}
+
+var E_Stdduration = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65011,
+	Name:          "gogoproto.stdduration",
+	Tag:           "varint,65011,opt,name=stdduration",
+	Filename:      "gogo.proto",
+}
+
+var E_Wktpointer = &proto.ExtensionDesc{
+	ExtendedType:  (*descriptor.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         65012,
+	Name:          "gogoproto.wktpointer",
+	Tag:           "varint,65012,opt,name=wktpointer",
+	Filename:      "gogo.proto",
+}
+
+func init() {
+	proto.RegisterExtension(E_GoprotoEnumPrefix)
+	proto.RegisterExtension(E_GoprotoEnumStringer)
+	proto.RegisterExtension(E_EnumStringer)
+	proto.RegisterExtension(E_EnumCustomname)
+	proto.RegisterExtension(E_Enumdecl)
+	proto.RegisterExtension(E_EnumvalueCustomname)
+	proto.RegisterExtension(E_GoprotoGettersAll)
+	proto.RegisterExtension(E_GoprotoEnumPrefixAll)
+	proto.RegisterExtension(E_GoprotoStringerAll)
+	proto.RegisterExtension(E_VerboseEqualAll)
+	proto.RegisterExtension(E_FaceAll)
+	proto.RegisterExtension(E_GostringAll)
+	proto.RegisterExtension(E_PopulateAll)
+	proto.RegisterExtension(E_StringerAll)
+	proto.RegisterExtension(E_OnlyoneAll)
+	proto.RegisterExtension(E_EqualAll)
+	proto.RegisterExtension(E_DescriptionAll)
+	proto.RegisterExtension(E_TestgenAll)
+	proto.RegisterExtension(E_BenchgenAll)
+	proto.RegisterExtension(E_MarshalerAll)
+	proto.RegisterExtension(E_UnmarshalerAll)
+	proto.RegisterExtension(E_StableMarshalerAll)
+	proto.RegisterExtension(E_SizerAll)
+	proto.RegisterExtension(E_GoprotoEnumStringerAll)
+	proto.RegisterExtension(E_EnumStringerAll)
+	proto.RegisterExtension(E_UnsafeMarshalerAll)
+	proto.RegisterExtension(E_UnsafeUnmarshalerAll)
+	proto.RegisterExtension(E_GoprotoExtensionsMapAll)
+	proto.RegisterExtension(E_GoprotoUnrecognizedAll)
+	proto.RegisterExtension(E_GogoprotoImport)
+	proto.RegisterExtension(E_ProtosizerAll)
+	proto.RegisterExtension(E_CompareAll)
+	proto.RegisterExtension(E_TypedeclAll)
+	proto.RegisterExtension(E_EnumdeclAll)
+	proto.RegisterExtension(E_GoprotoRegistration)
+	proto.RegisterExtension(E_MessagenameAll)
+	proto.RegisterExtension(E_GoprotoSizecacheAll)
+	proto.RegisterExtension(E_GoprotoUnkeyedAll)
+	proto.RegisterExtension(E_GoprotoGetters)
+	proto.RegisterExtension(E_GoprotoStringer)
+	proto.RegisterExtension(E_VerboseEqual)
+	proto.RegisterExtension(E_Face)
+	proto.RegisterExtension(E_Gostring)
+	proto.RegisterExtension(E_Populate)
+	proto.RegisterExtension(E_Stringer)
+	proto.RegisterExtension(E_Onlyone)
+	proto.RegisterExtension(E_Equal)
+	proto.RegisterExtension(E_Description)
+	proto.RegisterExtension(E_Testgen)
+	proto.RegisterExtension(E_Benchgen)
+	proto.RegisterExtension(E_Marshaler)
+	proto.RegisterExtension(E_Unmarshaler)
+	proto.RegisterExtension(E_StableMarshaler)
+	proto.RegisterExtension(E_Sizer)
+	proto.RegisterExtension(E_UnsafeMarshaler)
+	proto.RegisterExtension(E_UnsafeUnmarshaler)
+	proto.RegisterExtension(E_GoprotoExtensionsMap)
+	proto.RegisterExtension(E_GoprotoUnrecognized)
+	proto.RegisterExtension(E_Protosizer)
+	proto.RegisterExtension(E_Compare)
+	proto.RegisterExtension(E_Typedecl)
+	proto.RegisterExtension(E_Messagename)
+	proto.RegisterExtension(E_GoprotoSizecache)
+	proto.RegisterExtension(E_GoprotoUnkeyed)
+	proto.RegisterExtension(E_Nullable)
+	proto.RegisterExtension(E_Embed)
+	proto.RegisterExtension(E_Customtype)
+	proto.RegisterExtension(E_Customname)
+	proto.RegisterExtension(E_Jsontag)
+	proto.RegisterExtension(E_Moretags)
+	proto.RegisterExtension(E_Casttype)
+	proto.RegisterExtension(E_Castkey)
+	proto.RegisterExtension(E_Castvalue)
+	proto.RegisterExtension(E_Stdtime)
+	proto.RegisterExtension(E_Stdduration)
+	proto.RegisterExtension(E_Wktpointer)
+}
+
+func init() { proto.RegisterFile("gogo.proto", fileDescriptor_592445b5231bc2b9) }
+
+var fileDescriptor_592445b5231bc2b9 = []byte{
+	// 1328 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x49, 0x6f, 0x1c, 0x45,
+	0x14, 0x80, 0x85, 0x48, 0x64, 0x4f, 0x79, 0x8b, 0xc7, 0xc6, 0x84, 0x08, 0x44, 0xe0, 0xc4, 0xc9,
+	0x3e, 0x45, 0x28, 0x65, 0x45, 0x96, 0x63, 0x39, 0x56, 0x10, 0x0e, 0xc6, 0x89, 0xc3, 0x76, 0x18,
+	0xf5, 0xf4, 0x94, 0xdb, 0x8d, 0xbb, 0xbb, 0x9a, 0xee, 0xea, 0x10, 0xe7, 0x86, 0xc2, 0x22, 0x84,
+	0xd8, 0x91, 0x20, 0x21, 0x09, 0x04, 0xc4, 0xbe, 0x86, 0x7d, 0xb9, 0x70, 0x61, 0xb9, 0xf2, 0x1f,
+	0xb8, 0x00, 0x66, 0xf7, 0xcd, 0x17, 0xf4, 0xba, 0xdf, 0xeb, 0xa9, 0x69, 0x8f, 0x54, 0x35, 0xb7,
+	0xf6, 0xb8, 0xbe, 0x6f, 0xaa, 0xdf, 0xeb, 0x7a, 0xef, 0x4d, 0x33, 0xe6, 0x49, 0x4f, 0x4e, 0xc6,
+	0x89, 0x54, 0xb2, 0x5e, 0x83, 0xeb, 0xfc, 0x72, 0xdf, 0x7e, 0x4f, 0x4a, 0x2f, 0x10, 0x53, 0xf9,
+	0x5f, 0xcd, 0x6c, 0x75, 0xaa, 0x25, 0x52, 0x37, 0xf1, 0x63, 0x25, 0x93, 0x62, 0x31, 0x3f, 0xc6,
+	0xc6, 0x70, 0x71, 0x43, 0x44, 0x59, 0xd8, 0x88, 0x13, 0xb1, 0xea, 0x9f, 0xae, 0x5f, 0x3f, 0x59,
+	0x90, 0x93, 0x44, 0x4e, 0xce, 0x47, 0x59, 0x78, 0x47, 0xac, 0x7c, 0x19, 0xa5, 0x7b, 0xaf, 0xfc,
+	0x72, 0xf5, 0xfe, 0xab, 0x6e, 0xe9, 0x5f, 0x1e, 0x45, 0x14, 0xfe, 0xb7, 0x94, 0x83, 0x7c, 0x99,
+	0x5d, 0xd3, 0xe1, 0x4b, 0x55, 0xe2, 0x47, 0x9e, 0x48, 0x0c, 0xc6, 0xef, 0xd1, 0x38, 0xa6, 0x19,
+	0x8f, 0x23, 0xca, 0xe7, 0xd8, 0x50, 0x2f, 0xae, 0x1f, 0xd0, 0x35, 0x28, 0x74, 0xc9, 0x02, 0x1b,
+	0xc9, 0x25, 0x6e, 0x96, 0x2a, 0x19, 0x46, 0x4e, 0x28, 0x0c, 0x9a, 0x1f, 0x73, 0x4d, 0x6d, 0x79,
+	0x18, 0xb0, 0xb9, 0x92, 0xe2, 0x9c, 0xf5, 0xc3, 0x27, 0x2d, 0xe1, 0x06, 0x06, 0xc3, 0x4f, 0xb8,
+	0x91, 0x72, 0x3d, 0x3f, 0xc9, 0xc6, 0xe1, 0xfa, 0x94, 0x13, 0x64, 0x42, 0xdf, 0xc9, 0x4d, 0x5d,
+	0x3d, 0x27, 0x61, 0x19, 0xc9, 0x7e, 0x3e, 0xbb, 0x2b, 0xdf, 0xce, 0x58, 0x29, 0xd0, 0xf6, 0xa4,
+	0x65, 0xd1, 0x13, 0x4a, 0x89, 0x24, 0x6d, 0x38, 0x41, 0xb7, 0xed, 0x1d, 0xf1, 0x83, 0xd2, 0x78,
+	0x6e, 0xb3, 0x33, 0x8b, 0x0b, 0x05, 0x39, 0x1b, 0x04, 0x7c, 0x85, 0x5d, 0xdb, 0xe5, 0xa9, 0xb0,
+	0x70, 0x9e, 0x47, 0xe7, 0xf8, 0x8e, 0x27, 0x03, 0xb4, 0x4b, 0x8c, 0x3e, 0x2f, 0x73, 0x69, 0xe1,
+	0x7c, 0x19, 0x9d, 0x75, 0x64, 0x29, 0xa5, 0x60, 0xbc, 0x8d, 0x8d, 0x9e, 0x12, 0x49, 0x53, 0xa6,
+	0xa2, 0x21, 0x1e, 0xc8, 0x9c, 0xc0, 0x42, 0x77, 0x01, 0x75, 0x23, 0x08, 0xce, 0x03, 0x07, 0xae,
+	0x83, 0xac, 0x7f, 0xd5, 0x71, 0x85, 0x85, 0xe2, 0x22, 0x2a, 0xfa, 0x60, 0x3d, 0xa0, 0xb3, 0x6c,
+	0xd0, 0x93, 0xc5, 0x2d, 0x59, 0xe0, 0x97, 0x10, 0x1f, 0x20, 0x06, 0x15, 0xb1, 0x8c, 0xb3, 0xc0,
+	0x51, 0x36, 0x3b, 0x78, 0x85, 0x14, 0xc4, 0xa0, 0xa2, 0x87, 0xb0, 0xbe, 0x4a, 0x8a, 0x54, 0x8b,
+	0xe7, 0x0c, 0x1b, 0x90, 0x51, 0xb0, 0x21, 0x23, 0x9b, 0x4d, 0x5c, 0x46, 0x03, 0x43, 0x04, 0x04,
+	0xd3, 0xac, 0x66, 0x9b, 0x88, 0x37, 0x36, 0xe9, 0x78, 0x50, 0x06, 0x16, 0xd8, 0x08, 0x15, 0x28,
+	0x5f, 0x46, 0x16, 0x8a, 0x37, 0x51, 0x31, 0xac, 0x61, 0x78, 0x1b, 0x4a, 0xa4, 0xca, 0x13, 0x36,
+	0x92, 0xb7, 0xe8, 0x36, 0x10, 0xc1, 0x50, 0x36, 0x45, 0xe4, 0xae, 0xd9, 0x19, 0xde, 0xa6, 0x50,
+	0x12, 0x03, 0x8a, 0x39, 0x36, 0x14, 0x3a, 0x49, 0xba, 0xe6, 0x04, 0x56, 0xe9, 0x78, 0x07, 0x1d,
+	0x83, 0x25, 0x84, 0x11, 0xc9, 0xa2, 0x5e, 0x34, 0xef, 0x52, 0x44, 0x34, 0x0c, 0x8f, 0x5e, 0xaa,
+	0x9c, 0x66, 0x20, 0x1a, 0xbd, 0xd8, 0xde, 0xa3, 0xa3, 0x57, 0xb0, 0x8b, 0xba, 0x71, 0x9a, 0xd5,
+	0x52, 0xff, 0x8c, 0x95, 0xe6, 0x7d, 0xca, 0x74, 0x0e, 0x00, 0x7c, 0x0f, 0xbb, 0xae, 0x6b, 0x9b,
+	0xb0, 0x90, 0x7d, 0x80, 0xb2, 0x89, 0x2e, 0xad, 0x02, 0x4b, 0x42, 0xaf, 0xca, 0x0f, 0xa9, 0x24,
+	0x88, 0x8a, 0x6b, 0x89, 0x8d, 0x67, 0x51, 0xea, 0xac, 0xf6, 0x16, 0xb5, 0x8f, 0x28, 0x6a, 0x05,
+	0xdb, 0x11, 0xb5, 0x13, 0x6c, 0x02, 0x8d, 0xbd, 0xe5, 0xf5, 0x63, 0x2a, 0xac, 0x05, 0xbd, 0xd2,
+	0x99, 0xdd, 0xfb, 0xd8, 0xbe, 0x32, 0x9c, 0xa7, 0x95, 0x88, 0x52, 0x60, 0x1a, 0xa1, 0x13, 0x5b,
+	0x98, 0xaf, 0xa0, 0x99, 0x2a, 0xfe, 0x7c, 0x29, 0x58, 0x74, 0x62, 0x90, 0xdf, 0xcd, 0xf6, 0x92,
+	0x3c, 0x8b, 0x12, 0xe1, 0x4a, 0x2f, 0xf2, 0xcf, 0x88, 0x96, 0x85, 0xfa, 0x93, 0x4a, 0xaa, 0x56,
+	0x34, 0x1c, 0xcc, 0x47, 0xd9, 0x9e, 0x72, 0x56, 0x69, 0xf8, 0x61, 0x2c, 0x13, 0x65, 0x30, 0x7e,
+	0x4a, 0x99, 0x2a, 0xb9, 0xa3, 0x39, 0xc6, 0xe7, 0xd9, 0x70, 0xfe, 0xa7, 0xed, 0x23, 0xf9, 0x19,
+	0x8a, 0x86, 0xda, 0x14, 0x16, 0x0e, 0x57, 0x86, 0xb1, 0x93, 0xd8, 0xd4, 0xbf, 0xcf, 0xa9, 0x70,
+	0x20, 0x82, 0x85, 0x43, 0x6d, 0xc4, 0x02, 0xba, 0xbd, 0x85, 0xe1, 0x0b, 0x2a, 0x1c, 0xc4, 0xa0,
+	0x82, 0x06, 0x06, 0x0b, 0xc5, 0x97, 0xa4, 0x20, 0x06, 0x14, 0x77, 0xb6, 0x1b, 0x6d, 0x22, 0x3c,
+	0x3f, 0x55, 0x89, 0x03, 0xab, 0x0d, 0xaa, 0xaf, 0x36, 0x3b, 0x87, 0xb0, 0x65, 0x0d, 0x85, 0x4a,
+	0x14, 0x8a, 0x34, 0x75, 0x3c, 0x01, 0x13, 0x87, 0xc5, 0xc6, 0xbe, 0xa6, 0x4a, 0xa4, 0x61, 0xb0,
+	0x37, 0x6d, 0x42, 0x84, 0xb0, 0xbb, 0x8e, 0xbb, 0x66, 0xa3, 0xfb, 0xa6, 0xb2, 0xb9, 0xe3, 0xc4,
+	0x82, 0x53, 0x9b, 0x7f, 0xb2, 0x68, 0x5d, 0x6c, 0x58, 0x3d, 0x9d, 0xdf, 0x56, 0xe6, 0x9f, 0x95,
+	0x82, 0x2c, 0x6a, 0xc8, 0x48, 0x65, 0x9e, 0xaa, 0xdf, 0xb8, 0xc3, 0xb5, 0x58, 0xdc, 0x17, 0xe9,
+	0x1e, 0xda, 0xc2, 0xfb, 0xed, 0x1c, 0xa7, 0xf8, 0xed, 0xf0, 0x90, 0x77, 0x0e, 0x3d, 0x66, 0xd9,
+	0xd9, 0xad, 0xf2, 0x39, 0xef, 0x98, 0x79, 0xf8, 0x11, 0x36, 0xd4, 0x31, 0xf0, 0x98, 0x55, 0x0f,
+	0xa3, 0x6a, 0x50, 0x9f, 0x77, 0xf8, 0x01, 0xb6, 0x0b, 0x86, 0x17, 0x33, 0xfe, 0x08, 0xe2, 0xf9,
+	0x72, 0x7e, 0x88, 0xf5, 0xd3, 0xd0, 0x62, 0x46, 0x1f, 0x45, 0xb4, 0x44, 0x00, 0xa7, 0x81, 0xc5,
+	0x8c, 0x3f, 0x46, 0x38, 0x21, 0x80, 0xdb, 0x87, 0xf0, 0xbb, 0x27, 0x76, 0x61, 0xd3, 0xa1, 0xd8,
+	0x4d, 0xb3, 0x3e, 0x9c, 0x54, 0xcc, 0xf4, 0xe3, 0xf8, 0xe5, 0x44, 0xf0, 0x5b, 0xd9, 0x6e, 0xcb,
+	0x80, 0x3f, 0x89, 0x68, 0xb1, 0x9e, 0xcf, 0xb1, 0x01, 0x6d, 0x3a, 0x31, 0xe3, 0x4f, 0x21, 0xae,
+	0x53, 0xb0, 0x75, 0x9c, 0x4e, 0xcc, 0x82, 0xa7, 0x69, 0xeb, 0x48, 0x40, 0xd8, 0x68, 0x30, 0x31,
+	0xd3, 0xcf, 0x50, 0xd4, 0x09, 0xe1, 0x33, 0xac, 0x56, 0x36, 0x1b, 0x33, 0xff, 0x2c, 0xf2, 0x6d,
+	0x06, 0x22, 0xa0, 0x35, 0x3b, 0xb3, 0xe2, 0x39, 0x8a, 0x80, 0x46, 0xc1, 0x31, 0xaa, 0x0e, 0x30,
+	0x66, 0xd3, 0xf3, 0x74, 0x8c, 0x2a, 0xf3, 0x0b, 0x64, 0x33, 0xaf, 0xf9, 0x66, 0xc5, 0x0b, 0x94,
+	0xcd, 0x7c, 0x3d, 0x6c, 0xa3, 0x3a, 0x11, 0x98, 0x1d, 0x2f, 0xd2, 0x36, 0x2a, 0x03, 0x01, 0x5f,
+	0x62, 0xf5, 0x9d, 0xd3, 0x80, 0xd9, 0xf7, 0x12, 0xfa, 0x46, 0x77, 0x0c, 0x03, 0xfc, 0x2e, 0x36,
+	0xd1, 0x7d, 0x12, 0x30, 0x5b, 0xcf, 0x6d, 0x55, 0x7e, 0xbb, 0xe9, 0x83, 0x00, 0x3f, 0xd1, 0x6e,
+	0x29, 0xfa, 0x14, 0x60, 0xd6, 0x9e, 0xdf, 0xea, 0x2c, 0xdc, 0xfa, 0x10, 0xc0, 0x67, 0x19, 0x6b,
+	0x37, 0x60, 0xb3, 0xeb, 0x02, 0xba, 0x34, 0x08, 0x8e, 0x06, 0xf6, 0x5f, 0x33, 0x7f, 0x91, 0x8e,
+	0x06, 0x12, 0x70, 0x34, 0xa8, 0xf5, 0x9a, 0xe9, 0x4b, 0x74, 0x34, 0x08, 0x81, 0x27, 0x5b, 0xeb,
+	0x6e, 0x66, 0xc3, 0x65, 0x7a, 0xb2, 0x35, 0x8a, 0x1f, 0x63, 0xa3, 0x3b, 0x1a, 0xa2, 0x59, 0xf5,
+	0x1a, 0xaa, 0xf6, 0x54, 0xfb, 0xa1, 0xde, 0xbc, 0xb0, 0x19, 0x9a, 0x6d, 0xaf, 0x57, 0x9a, 0x17,
+	0xf6, 0x42, 0x3e, 0xcd, 0xfa, 0xa3, 0x2c, 0x08, 0xe0, 0xf0, 0xd4, 0x6f, 0xe8, 0xd2, 0x4d, 0x45,
+	0xd0, 0x22, 0xc5, 0xaf, 0xdb, 0x18, 0x1d, 0x02, 0xf8, 0x01, 0xb6, 0x5b, 0x84, 0x4d, 0xd1, 0x32,
+	0x91, 0xbf, 0x6d, 0x53, 0xc1, 0x84, 0xd5, 0x7c, 0x86, 0xb1, 0xe2, 0xd5, 0x08, 0x84, 0xd9, 0xc4,
+	0xfe, 0xbe, 0x5d, 0xbc, 0xa5, 0xd1, 0x90, 0xb6, 0x20, 0x4f, 0x8a, 0x41, 0xb0, 0xd9, 0x29, 0xc8,
+	0x33, 0x72, 0x90, 0xf5, 0xdd, 0x9f, 0xca, 0x48, 0x39, 0x9e, 0x89, 0xfe, 0x03, 0x69, 0x5a, 0x0f,
+	0x01, 0x0b, 0x65, 0x22, 0x94, 0xe3, 0xa5, 0x26, 0xf6, 0x4f, 0x64, 0x4b, 0x00, 0x60, 0xd7, 0x49,
+	0x95, 0xcd, 0x7d, 0xff, 0x45, 0x30, 0x01, 0xb0, 0x69, 0xb8, 0x5e, 0x17, 0x1b, 0x26, 0xf6, 0x6f,
+	0xda, 0x34, 0xae, 0xe7, 0x87, 0x58, 0x0d, 0x2e, 0xf3, 0xb7, 0x4a, 0x26, 0xf8, 0x1f, 0x84, 0xdb,
+	0x04, 0x7c, 0x73, 0xaa, 0x5a, 0xca, 0x37, 0x07, 0xfb, 0x5f, 0xcc, 0x34, 0xad, 0xe7, 0xb3, 0x6c,
+	0x20, 0x55, 0xad, 0x56, 0x86, 0xf3, 0xa9, 0x01, 0xff, 0x6f, 0xbb, 0x7c, 0x65, 0x51, 0x32, 0x90,
+	0xed, 0x07, 0xd7, 0x55, 0x2c, 0xfd, 0x48, 0x89, 0xc4, 0x64, 0xd8, 0x42, 0x83, 0x86, 0x1c, 0x9e,
+	0x67, 0x63, 0xae, 0x0c, 0xab, 0xdc, 0x61, 0xb6, 0x20, 0x17, 0xe4, 0x52, 0x5e, 0x67, 0xee, 0xbd,
+	0xd9, 0xf3, 0xd5, 0x5a, 0xd6, 0x9c, 0x74, 0x65, 0x38, 0x05, 0xbf, 0x3c, 0xda, 0x2f, 0x54, 0xcb,
+	0xdf, 0x21, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xaf, 0x70, 0x4e, 0x83, 0x15, 0x00, 0x00,
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
new file mode 100644
index 0000000..f6502e4
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.golden
@@ -0,0 +1,45 @@
+// Code generated by protoc-gen-go.
+// source: gogo.proto
+// DO NOT EDIT!
+
+package gogoproto
+
+import proto "github.com/gogo/protobuf/proto"
+import json "encoding/json"
+import math "math"
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+
+// Reference proto, json, and math imports to suppress error if they are not otherwise used.
+var _ = proto.Marshal
+var _ = &json.SyntaxError{}
+var _ = math.Inf
+
+var E_Nullable = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         51235,
+	Name:          "gogoproto.nullable",
+	Tag:           "varint,51235,opt,name=nullable",
+}
+
+var E_Embed = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*bool)(nil),
+	Field:         51236,
+	Name:          "gogoproto.embed",
+	Tag:           "varint,51236,opt,name=embed",
+}
+
+var E_Customtype = &proto.ExtensionDesc{
+	ExtendedType:  (*google_protobuf.FieldOptions)(nil),
+	ExtensionType: (*string)(nil),
+	Field:         51237,
+	Name:          "gogoproto.customtype",
+	Tag:           "bytes,51237,opt,name=customtype",
+}
+
+func init() {
+	proto.RegisterExtension(E_Nullable)
+	proto.RegisterExtension(E_Embed)
+	proto.RegisterExtension(E_Customtype)
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
new file mode 100644
index 0000000..b80c856
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto
@@ -0,0 +1,144 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto2";
+package gogoproto;
+
+import "google/protobuf/descriptor.proto";
+
+option java_package = "com.google.protobuf";
+option java_outer_classname = "GoGoProtos";
+option go_package = "github.com/gogo/protobuf/gogoproto";
+
+extend google.protobuf.EnumOptions {
+	optional bool goproto_enum_prefix = 62001;
+	optional bool goproto_enum_stringer = 62021;
+	optional bool enum_stringer = 62022;
+	optional string enum_customname = 62023;
+	optional bool enumdecl = 62024;
+}
+
+extend google.protobuf.EnumValueOptions {
+	optional string enumvalue_customname = 66001;
+}
+
+extend google.protobuf.FileOptions {
+	optional bool goproto_getters_all = 63001;
+	optional bool goproto_enum_prefix_all = 63002;
+	optional bool goproto_stringer_all = 63003;
+	optional bool verbose_equal_all = 63004;
+	optional bool face_all = 63005;
+	optional bool gostring_all = 63006;
+	optional bool populate_all = 63007;
+	optional bool stringer_all = 63008;
+	optional bool onlyone_all = 63009;
+
+	optional bool equal_all = 63013;
+	optional bool description_all = 63014;
+	optional bool testgen_all = 63015;
+	optional bool benchgen_all = 63016;
+	optional bool marshaler_all = 63017;
+	optional bool unmarshaler_all = 63018;
+	optional bool stable_marshaler_all = 63019;
+
+	optional bool sizer_all = 63020;
+
+	optional bool goproto_enum_stringer_all = 63021;
+	optional bool enum_stringer_all = 63022;
+
+	optional bool unsafe_marshaler_all = 63023;
+	optional bool unsafe_unmarshaler_all = 63024;
+
+	optional bool goproto_extensions_map_all = 63025;
+	optional bool goproto_unrecognized_all = 63026;
+	optional bool gogoproto_import = 63027;
+	optional bool protosizer_all = 63028;
+	optional bool compare_all = 63029;
+    optional bool typedecl_all = 63030;
+    optional bool enumdecl_all = 63031;
+
+	optional bool goproto_registration = 63032;
+	optional bool messagename_all = 63033;
+
+	optional bool goproto_sizecache_all = 63034;
+	optional bool goproto_unkeyed_all = 63035;
+}
+
+extend google.protobuf.MessageOptions {
+	optional bool goproto_getters = 64001;
+	optional bool goproto_stringer = 64003;
+	optional bool verbose_equal = 64004;
+	optional bool face = 64005;
+	optional bool gostring = 64006;
+	optional bool populate = 64007;
+	optional bool stringer = 67008;
+	optional bool onlyone = 64009;
+
+	optional bool equal = 64013;
+	optional bool description = 64014;
+	optional bool testgen = 64015;
+	optional bool benchgen = 64016;
+	optional bool marshaler = 64017;
+	optional bool unmarshaler = 64018;
+	optional bool stable_marshaler = 64019;
+
+	optional bool sizer = 64020;
+
+	optional bool unsafe_marshaler = 64023;
+	optional bool unsafe_unmarshaler = 64024;
+
+	optional bool goproto_extensions_map = 64025;
+	optional bool goproto_unrecognized = 64026;
+
+	optional bool protosizer = 64028;
+	optional bool compare = 64029;
+
+	optional bool typedecl = 64030;
+
+	optional bool messagename = 64033;
+
+	optional bool goproto_sizecache = 64034;
+	optional bool goproto_unkeyed = 64035;
+}
+
+extend google.protobuf.FieldOptions {
+	optional bool nullable = 65001;
+	optional bool embed = 65002;
+	optional string customtype = 65003;
+	optional string customname = 65004;
+	optional string jsontag = 65005;
+	optional string moretags = 65006;
+	optional string casttype = 65007;
+	optional string castkey = 65008;
+	optional string castvalue = 65009;
+
+	optional bool stdtime = 65010;
+	optional bool stdduration = 65011;
+	optional bool wktpointer = 65012;
+
+}
diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
new file mode 100644
index 0000000..390d4e4
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go
@@ -0,0 +1,415 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package gogoproto
+
+import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor"
+import proto "github.com/gogo/protobuf/proto"
+
+func IsEmbed(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Embed, false)
+}
+
+func IsNullable(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Nullable, true)
+}
+
+func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Stdtime, false)
+}
+
+func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Stdduration, false)
+}
+
+func IsStdDouble(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.DoubleValue"
+}
+
+func IsStdFloat(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.FloatValue"
+}
+
+func IsStdInt64(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int64Value"
+}
+
+func IsStdUInt64(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt64Value"
+}
+
+func IsStdInt32(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.Int32Value"
+}
+
+func IsStdUInt32(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.UInt32Value"
+}
+
+func IsStdBool(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BoolValue"
+}
+
+func IsStdString(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.StringValue"
+}
+
+func IsStdBytes(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false) && *field.TypeName == ".google.protobuf.BytesValue"
+}
+
+func IsStdType(field *google_protobuf.FieldDescriptorProto) bool {
+	return (IsStdTime(field) || IsStdDuration(field) ||
+		IsStdDouble(field) || IsStdFloat(field) ||
+		IsStdInt64(field) || IsStdUInt64(field) ||
+		IsStdInt32(field) || IsStdUInt32(field) ||
+		IsStdBool(field) ||
+		IsStdString(field) || IsStdBytes(field))
+}
+
+func IsWktPtr(field *google_protobuf.FieldDescriptorProto) bool {
+	return proto.GetBoolExtension(field.Options, E_Wktpointer, false)
+}
+
+func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool {
+	nullable := IsNullable(field)
+	if field.IsMessage() || IsCustomType(field) {
+		return nullable
+	}
+	if proto3 {
+		return false
+	}
+	return nullable || *field.Type == google_protobuf.FieldDescriptorProto_TYPE_BYTES
+}
+
+func IsCustomType(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCustomType(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsCastType(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCastType(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsCastKey(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCastKey(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool {
+	typ := GetCastValue(field)
+	if len(typ) > 0 {
+		return true
+	}
+	return false
+}
+
+func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true))
+}
+
+func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true))
+}
+
+func GetCustomType(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Customtype)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetCastType(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Casttype)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetCastKey(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Castkey)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetCastValue(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Castvalue)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func IsCustomName(field *google_protobuf.FieldDescriptorProto) bool {
+	name := GetCustomName(field)
+	if len(name) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsEnumCustomName(field *google_protobuf.EnumDescriptorProto) bool {
+	name := GetEnumCustomName(field)
+	if len(name) > 0 {
+		return true
+	}
+	return false
+}
+
+func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool {
+	name := GetEnumValueCustomName(field)
+	if len(name) > 0 {
+		return true
+	}
+	return false
+}
+
+func GetCustomName(field *google_protobuf.FieldDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Customname)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_EnumCustomname)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string {
+	if field == nil {
+		return ""
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname)
+		if err == nil && v.(*string) != nil {
+			return *(v.(*string))
+		}
+	}
+	return ""
+}
+
+func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string {
+	if field == nil {
+		return nil
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Jsontag)
+		if err == nil && v.(*string) != nil {
+			return (v.(*string))
+		}
+	}
+	return nil
+}
+
+func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string {
+	if field == nil {
+		return nil
+	}
+	if field.Options != nil {
+		v, err := proto.GetExtension(field.Options, E_Moretags)
+		if err == nil && v.(*string) != nil {
+			return (v.(*string))
+		}
+	}
+	return nil
+}
+
+type EnableFunc func(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool
+
+func EnabledGoEnumPrefix(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_GoprotoEnumPrefix, proto.GetBoolExtension(file.Options, E_GoprotoEnumPrefixAll, true))
+}
+
+func EnabledGoStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoStringer, proto.GetBoolExtension(file.Options, E_GoprotoStringerAll, true))
+}
+
+func HasGoGetters(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoGetters, proto.GetBoolExtension(file.Options, E_GoprotoGettersAll, true))
+}
+
+func IsUnion(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Onlyone, proto.GetBoolExtension(file.Options, E_OnlyoneAll, false))
+}
+
+func HasGoString(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Gostring, proto.GetBoolExtension(file.Options, E_GostringAll, false))
+}
+
+func HasEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Equal, proto.GetBoolExtension(file.Options, E_EqualAll, false))
+}
+
+func HasVerboseEqual(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_VerboseEqual, proto.GetBoolExtension(file.Options, E_VerboseEqualAll, false))
+}
+
+func IsStringer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Stringer, proto.GetBoolExtension(file.Options, E_StringerAll, false))
+}
+
+func IsFace(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Face, proto.GetBoolExtension(file.Options, E_FaceAll, false))
+}
+
+func HasDescription(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Description, proto.GetBoolExtension(file.Options, E_DescriptionAll, false))
+}
+
+func HasPopulate(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Populate, proto.GetBoolExtension(file.Options, E_PopulateAll, false))
+}
+
+func HasTestGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Testgen, proto.GetBoolExtension(file.Options, E_TestgenAll, false))
+}
+
+func HasBenchGen(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Benchgen, proto.GetBoolExtension(file.Options, E_BenchgenAll, false))
+}
+
+func IsMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Marshaler, proto.GetBoolExtension(file.Options, E_MarshalerAll, false))
+}
+
+func IsUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Unmarshaler, proto.GetBoolExtension(file.Options, E_UnmarshalerAll, false))
+}
+
+func IsStableMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_StableMarshaler, proto.GetBoolExtension(file.Options, E_StableMarshalerAll, false))
+}
+
+func IsSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Sizer, proto.GetBoolExtension(file.Options, E_SizerAll, false))
+}
+
+func IsProtoSizer(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Protosizer, proto.GetBoolExtension(file.Options, E_ProtosizerAll, false))
+}
+
+func IsGoEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_GoprotoEnumStringer, proto.GetBoolExtension(file.Options, E_GoprotoEnumStringerAll, true))
+}
+
+func IsEnumStringer(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool {
+	return proto.GetBoolExtension(enum.Options, E_EnumStringer, proto.GetBoolExtension(file.Options, E_EnumStringerAll, false))
+}
+
+func IsUnsafeMarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_UnsafeMarshaler, proto.GetBoolExtension(file.Options, E_UnsafeMarshalerAll, false))
+}
+
+func IsUnsafeUnmarshaler(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_UnsafeUnmarshaler, proto.GetBoolExtension(file.Options, E_UnsafeUnmarshalerAll, false))
+}
+
+func HasExtensionsMap(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoExtensionsMap, proto.GetBoolExtension(file.Options, E_GoprotoExtensionsMapAll, true))
+}
+
+func HasUnrecognized(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoUnrecognized, proto.GetBoolExtension(file.Options, E_GoprotoUnrecognizedAll, true))
+}
+
+func IsProto3(file *google_protobuf.FileDescriptorProto) bool {
+	return file.GetSyntax() == "proto3"
+}
+
+func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool {
+	return proto.GetBoolExtension(file.Options, E_GogoprotoImport, true)
+}
+
+func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false))
+}
+
+func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool {
+	return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false)
+}
+
+func HasMessageName(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_Messagename, proto.GetBoolExtension(file.Options, E_MessagenameAll, false))
+}
+
+func HasSizecache(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoSizecache, proto.GetBoolExtension(file.Options, E_GoprotoSizecacheAll, true))
+}
+
+func HasUnkeyed(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool {
+	return proto.GetBoolExtension(message.Options, E_GoprotoUnkeyed, proto.GetBoolExtension(file.Options, E_GoprotoUnkeyedAll, true))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile
new file mode 100644
index 0000000..00d65f3
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+	go install
+
+test: install generate-test-pbs
+	go test
+
+
+generate-test-pbs:
+	make install
+	make -C test_proto
+	make -C proto3_proto
+	make
diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go
new file mode 100644
index 0000000..a26b046
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/clone.go
@@ -0,0 +1,258 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy and merge.
+// TODO: RawMessage.
+
+package proto
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(src Message) Message {
+	in := reflect.ValueOf(src)
+	if in.IsNil() {
+		return src
+	}
+	out := reflect.New(in.Type().Elem())
+	dst := out.Interface().(Message)
+	Merge(dst, src)
+	return dst
+}
+
+// Merger is the interface representing objects that can merge messages of the same type.
+type Merger interface {
+	// Merge merges src into this message.
+	// Required and optional fields that are set in src will be set to that value in dst.
+	// Elements of repeated fields will be appended.
+	//
+	// Merge may panic if called with a different argument type than the receiver.
+	Merge(src Message)
+}
+
+// generatedMerger is the custom merge method that generated protos will have.
+// We must add this method since a generate Merge method will conflict with
+// many existing protos that have a Merge data field already defined.
+type generatedMerger interface {
+	XXX_Merge(src Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+	if m, ok := dst.(Merger); ok {
+		m.Merge(src)
+		return
+	}
+
+	in := reflect.ValueOf(src)
+	out := reflect.ValueOf(dst)
+	if out.IsNil() {
+		panic("proto: nil destination")
+	}
+	if in.Type() != out.Type() {
+		panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src))
+	}
+	if in.IsNil() {
+		return // Merge from nil src is a noop
+	}
+	if m, ok := dst.(generatedMerger); ok {
+		m.XXX_Merge(src)
+		return
+	}
+	mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+	sprop := GetProperties(in.Type())
+	for i := 0; i < in.NumField(); i++ {
+		f := in.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i])
+	}
+
+	if emIn, ok := in.Addr().Interface().(extensionsBytes); ok {
+		emOut := out.Addr().Interface().(extensionsBytes)
+		bIn := emIn.GetExtensions()
+		bOut := emOut.GetExtensions()
+		*bOut = append(*bOut, *bIn...)
+	} else if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	uf := in.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return
+	}
+	uin := uf.Bytes()
+	if len(uin) > 0 {
+		out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+	}
+}
+
+// mergeAny performs a merge between two values of the same type.
+// viaPtr indicates whether the values were indirected through a pointer (implying proto2).
+// prop is set if this is a struct field (it may be nil).
+func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) {
+	if in.Type() == protoMessageType {
+		if !in.IsNil() {
+			if out.IsNil() {
+				out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+			} else {
+				Merge(out.Interface().(Message), in.Interface().(Message))
+			}
+		}
+		return
+	}
+	switch in.Kind() {
+	case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+		reflect.String, reflect.Uint32, reflect.Uint64:
+		if !viaPtr && isProto3Zero(in) {
+			return
+		}
+		out.Set(in)
+	case reflect.Interface:
+		// Probably a oneof field; copy non-nil values.
+		if in.IsNil() {
+			return
+		}
+		// Allocate destination if it is not set, or set to a different type.
+		// Otherwise we will merge as normal.
+		if out.IsNil() || out.Elem().Type() != in.Elem().Type() {
+			out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T)
+		}
+		mergeAny(out.Elem(), in.Elem(), false, nil)
+	case reflect.Map:
+		if in.Len() == 0 {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.MakeMap(in.Type()))
+		}
+		// For maps with value types of *T or []byte we need to deep copy each value.
+		elemKind := in.Type().Elem().Kind()
+		for _, key := range in.MapKeys() {
+			var val reflect.Value
+			switch elemKind {
+			case reflect.Ptr:
+				val = reflect.New(in.Type().Elem().Elem())
+				mergeAny(val, in.MapIndex(key), false, nil)
+			case reflect.Slice:
+				val = in.MapIndex(key)
+				val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+			default:
+				val = in.MapIndex(key)
+			}
+			out.SetMapIndex(key, val)
+		}
+	case reflect.Ptr:
+		if in.IsNil() {
+			return
+		}
+		if out.IsNil() {
+			out.Set(reflect.New(in.Elem().Type()))
+		}
+		mergeAny(out.Elem(), in.Elem(), true, nil)
+	case reflect.Slice:
+		if in.IsNil() {
+			return
+		}
+		if in.Type().Elem().Kind() == reflect.Uint8 {
+			// []byte is a scalar bytes field, not a repeated field.
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value, and should not
+			// be merged.
+			if prop != nil && prop.proto3 && in.Len() == 0 {
+				return
+			}
+
+			// Make a deep copy.
+			// Append to []byte{} instead of []byte(nil) so that we never end up
+			// with a nil result.
+			out.SetBytes(append([]byte{}, in.Bytes()...))
+			return
+		}
+		n := in.Len()
+		if out.IsNil() {
+			out.Set(reflect.MakeSlice(in.Type(), 0, n))
+		}
+		switch in.Type().Elem().Kind() {
+		case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+			reflect.String, reflect.Uint32, reflect.Uint64:
+			out.Set(reflect.AppendSlice(out, in))
+		default:
+			for i := 0; i < n; i++ {
+				x := reflect.Indirect(reflect.New(in.Type().Elem()))
+				mergeAny(x, in.Index(i), false, nil)
+				out.Set(reflect.Append(out, x))
+			}
+		}
+	case reflect.Struct:
+		mergeStruct(out, in)
+	default:
+		// unknown type, so not a protocol buffer
+		log.Printf("proto: don't know how to copy %v", in)
+	}
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+	for extNum, eIn := range in {
+		eOut := Extension{desc: eIn.desc}
+		if eIn.value != nil {
+			v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+			mergeAny(v, reflect.ValueOf(eIn.value), false, nil)
+			eOut.value = v.Interface()
+		}
+		if eIn.enc != nil {
+			eOut.enc = make([]byte, len(eIn.enc))
+			copy(eOut.enc, eIn.enc)
+		}
+
+		out[extNum] = eOut
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/custom_gogo.go b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
new file mode 100644
index 0000000..2455248
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/custom_gogo.go
@@ -0,0 +1,39 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "reflect"
+
+type custom interface {
+	Marshal() ([]byte, error)
+	Unmarshal(data []byte) error
+	Size() int
+}
+
+var customType = reflect.TypeOf((*custom)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go
new file mode 100644
index 0000000..63b0f08
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/decode.go
@@ -0,0 +1,427 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// ErrInternalBadWireType is returned by generated code when an incorrect
+// wire type is encountered. It does not get returned to user code.
+var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+	for shift := uint(0); shift < 64; shift += 7 {
+		if n >= len(buf) {
+			return 0, 0
+		}
+		b := uint64(buf[n])
+		n++
+		x |= (b & 0x7F) << shift
+		if (b & 0x80) == 0 {
+			return x, n
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	return 0, 0
+}
+
+func (p *Buffer) decodeVarintSlow() (x uint64, err error) {
+	i := p.index
+	l := len(p.buf)
+
+	for shift := uint(0); shift < 64; shift += 7 {
+		if i >= l {
+			err = io.ErrUnexpectedEOF
+			return
+		}
+		b := p.buf[i]
+		i++
+		x |= (uint64(b) & 0x7F) << shift
+		if b < 0x80 {
+			p.index = i
+			return
+		}
+	}
+
+	// The number is too large to represent in a 64-bit value.
+	err = errOverflow
+	return
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+	i := p.index
+	buf := p.buf
+
+	if i >= len(buf) {
+		return 0, io.ErrUnexpectedEOF
+	} else if buf[i] < 0x80 {
+		p.index++
+		return uint64(buf[i]), nil
+	} else if len(buf)-i < 10 {
+		return p.decodeVarintSlow()
+	}
+
+	var b uint64
+	// we already checked the first byte
+	x = uint64(buf[i]) - 0x80
+	i++
+
+	b = uint64(buf[i])
+	i++
+	x += b << 7
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 7
+
+	b = uint64(buf[i])
+	i++
+	x += b << 14
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 14
+
+	b = uint64(buf[i])
+	i++
+	x += b << 21
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 21
+
+	b = uint64(buf[i])
+	i++
+	x += b << 28
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 28
+
+	b = uint64(buf[i])
+	i++
+	x += b << 35
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 35
+
+	b = uint64(buf[i])
+	i++
+	x += b << 42
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 42
+
+	b = uint64(buf[i])
+	i++
+	x += b << 49
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 49
+
+	b = uint64(buf[i])
+	i++
+	x += b << 56
+	if b&0x80 == 0 {
+		goto done
+	}
+	x -= 0x80 << 56
+
+	b = uint64(buf[i])
+	i++
+	x += b << 63
+	if b&0x80 == 0 {
+		goto done
+	}
+
+	return 0, errOverflow
+
+done:
+	p.index = i
+	return x, nil
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 8
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-8])
+	x |= uint64(p.buf[i-7]) << 8
+	x |= uint64(p.buf[i-6]) << 16
+	x |= uint64(p.buf[i-5]) << 24
+	x |= uint64(p.buf[i-4]) << 32
+	x |= uint64(p.buf[i-3]) << 40
+	x |= uint64(p.buf[i-2]) << 48
+	x |= uint64(p.buf[i-1]) << 56
+	return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+	// x, err already 0
+	i := p.index + 4
+	if i < 0 || i > len(p.buf) {
+		err = io.ErrUnexpectedEOF
+		return
+	}
+	p.index = i
+
+	x = uint64(p.buf[i-4])
+	x |= uint64(p.buf[i-3]) << 8
+	x |= uint64(p.buf[i-2]) << 16
+	x |= uint64(p.buf[i-1]) << 24
+	return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+	return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from  the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+	x, err = p.DecodeVarint()
+	if err != nil {
+		return
+	}
+	x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+	return
+}
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+	n, err := p.DecodeVarint()
+	if err != nil {
+		return nil, err
+	}
+
+	nb := int(n)
+	if nb < 0 {
+		return nil, fmt.Errorf("proto: bad byte length %d", nb)
+	}
+	end := p.index + nb
+	if end < p.index || end > len(p.buf) {
+		return nil, io.ErrUnexpectedEOF
+	}
+
+	if !alloc {
+		// todo: check if can get more uses of alloc=false
+		buf = p.buf[p.index:end]
+		p.index += nb
+		return
+	}
+
+	buf = make([]byte, nb)
+	copy(buf, p.buf[p.index:])
+	p.index += nb
+	return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+	buf, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return
+	}
+	return string(buf), nil
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves.  The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+// Unmarshal implementations should not clear the receiver.
+// Any unmarshaled data should be merged into the receiver.
+// Callers of Unmarshal that do not want to retain existing data
+// should Reset the receiver before calling Unmarshal.
+type Unmarshaler interface {
+	Unmarshal([]byte) error
+}
+
+// newUnmarshaler is the interface representing objects that can
+// unmarshal themselves. The semantics are identical to Unmarshaler.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newUnmarshaler interface {
+	XXX_Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+	pb.Reset()
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb.  If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+	if u, ok := pb.(newUnmarshaler); ok {
+		return u.XXX_Unmarshal(buf)
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		return u.Unmarshal(buf)
+	}
+	return NewBuffer(buf).Unmarshal(pb)
+}
+
+// DecodeMessage reads a count-delimited message from the Buffer.
+func (p *Buffer) DecodeMessage(pb Message) error {
+	enc, err := p.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return NewBuffer(enc).Unmarshal(pb)
+}
+
+// DecodeGroup reads a tag-delimited group from the Buffer.
+// StartGroup tag is already consumed. This function consumes
+// EndGroup tag.
+func (p *Buffer) DecodeGroup(pb Message) error {
+	b := p.buf[p.index:]
+	x, y := findEndGroup(b)
+	if x < 0 {
+		return io.ErrUnexpectedEOF
+	}
+	err := Unmarshal(b[:x], pb)
+	p.index += y
+	return err
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb.  If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+//
+// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal.
+func (p *Buffer) Unmarshal(pb Message) error {
+	// If the object can unmarshal itself, let it.
+	if u, ok := pb.(newUnmarshaler); ok {
+		err := u.XXX_Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+	if u, ok := pb.(Unmarshaler); ok {
+		// NOTE: The history of proto have unfortunately been inconsistent
+		// whether Unmarshaler should or should not implicitly clear itself.
+		// Some implementations do, most do not.
+		// Thus, calling this here may or may not do what people want.
+		//
+		// See https://github.com/golang/protobuf/issues/424
+		err := u.Unmarshal(p.buf[p.index:])
+		p.index = len(p.buf)
+		return err
+	}
+
+	// Slow workaround for messages that aren't Unmarshalers.
+	// This includes some hand-coded .pb.go files and
+	// bootstrap protos.
+	// TODO: fix all of those and then add Unmarshal to
+	// the Message interface. Then:
+	// The cast above and code below can be deleted.
+	// The old unmarshaler can be deleted.
+	// Clients can call Unmarshal directly (can already do that, actually).
+	var info InternalMessageInfo
+	err := info.Unmarshal(pb, p.buf[p.index:])
+	p.index = len(p.buf)
+	return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/deprecated.go b/vendor/github.com/gogo/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..35b882c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/deprecated.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2018 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import "errors"
+
+// Deprecated: do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: do not use.
+func RegisterMessageSetType(Message, int32, string) {}
diff --git a/vendor/github.com/gogo/protobuf/proto/discard.go b/vendor/github.com/gogo/protobuf/proto/discard.go
new file mode 100644
index 0000000..fe1bd7d
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/discard.go
@@ -0,0 +1,350 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2017 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+type generatedDiscarder interface {
+	XXX_DiscardUnknown()
+}
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+//
+// For proto2 messages, the unknown fields of message extensions are only
+// discarded from messages that have been accessed via GetExtension.
+func DiscardUnknown(m Message) {
+	if m, ok := m.(generatedDiscarder); ok {
+		m.XXX_DiscardUnknown()
+		return
+	}
+	// TODO: Dynamically populate a InternalMessageInfo for legacy messages,
+	// but the master branch has no implementation for InternalMessageInfo,
+	// so it would be more work to replicate that approach.
+	discardLegacy(m)
+}
+
+// DiscardUnknown recursively discards all unknown fields.
+func (a *InternalMessageInfo) DiscardUnknown(m Message) {
+	di := atomicLoadDiscardInfo(&a.discard)
+	if di == nil {
+		di = getDiscardInfo(reflect.TypeOf(m).Elem())
+		atomicStoreDiscardInfo(&a.discard, di)
+	}
+	di.discard(toPointer(&m))
+}
+
+type discardInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []discardFieldInfo
+	unrecognized field
+}
+
+type discardFieldInfo struct {
+	field   field // Offset of field, guaranteed to be valid
+	discard func(src pointer)
+}
+
+var (
+	discardInfoMap  = map[reflect.Type]*discardInfo{}
+	discardInfoLock sync.Mutex
+)
+
+func getDiscardInfo(t reflect.Type) *discardInfo {
+	discardInfoLock.Lock()
+	defer discardInfoLock.Unlock()
+	di := discardInfoMap[t]
+	if di == nil {
+		di = &discardInfo{typ: t}
+		discardInfoMap[t] = di
+	}
+	return di
+}
+
+func (di *discardInfo) discard(src pointer) {
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&di.initialized) == 0 {
+		di.computeDiscardInfo()
+	}
+
+	for _, fi := range di.fields {
+		sfp := src.offset(fi.field)
+		fi.discard(sfp)
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil {
+		// Ignore lock since DiscardUnknown is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				DiscardUnknown(m)
+			}
+		}
+	}
+
+	if di.unrecognized.IsValid() {
+		*src.offset(di.unrecognized).toBytes() = nil
+	}
+}
+
+func (di *discardInfo) computeDiscardInfo() {
+	di.lock.Lock()
+	defer di.lock.Unlock()
+	if di.initialized != 0 {
+		return
+	}
+	t := di.typ
+	n := t.NumField()
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		dfi := discardFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name))
+			case isSlice: // E.g., []*pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sps := src.getPointerSlice()
+					for _, sp := range sps {
+						if !sp.isNil() {
+							discardInfo.discard(sp)
+						}
+					}
+				}
+			default: // E.g., *pb.T
+				discardInfo := getDiscardInfo(tf)
+				dfi.discard = func(src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						discardInfo.discard(sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name))
+			default: // E.g., map[K]V
+				if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T)
+					dfi.discard = func(src pointer) {
+						sm := src.asPointerTo(tf).Elem()
+						if sm.Len() == 0 {
+							return
+						}
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							DiscardUnknown(val.Interface().(Message))
+						}
+					}
+				} else {
+					dfi.discard = func(pointer) {} // Noop
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name))
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				dfi.discard = func(src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							DiscardUnknown(sv.Interface().(Message))
+						}
+					}
+				}
+			}
+		default:
+			continue
+		}
+		di.fields = append(di.fields, dfi)
+	}
+
+	di.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		di.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&di.initialized, 1)
+}
+
+func discardLegacy(m Message) {
+	v := reflect.ValueOf(m)
+	if v.Kind() != reflect.Ptr || v.IsNil() {
+		return
+	}
+	v = v.Elem()
+	if v.Kind() != reflect.Struct {
+		return
+	}
+	t := v.Type()
+
+	for i := 0; i < v.NumField(); i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		vf := v.Field(i)
+		tf := f.Type
+
+		// Unwrap tf to get its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
+		}
+
+		switch tf.Kind() {
+		case reflect.Struct:
+			switch {
+			case !isPointer:
+				panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
+			case isSlice: // E.g., []*pb.T
+				for j := 0; j < vf.Len(); j++ {
+					discardLegacy(vf.Index(j).Interface().(Message))
+				}
+			default: // E.g., *pb.T
+				discardLegacy(vf.Interface().(Message))
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
+			default: // E.g., map[K]V
+				tv := vf.Type().Elem()
+				if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
+					for _, key := range vf.MapKeys() {
+						val := vf.MapIndex(key)
+						discardLegacy(val.Interface().(Message))
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
+			default: // E.g., test_proto.isCommunique_Union interface
+				if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
+					vf = vf.Elem() // E.g., *test_proto.Communique_Msg
+					if !vf.IsNil() {
+						vf = vf.Elem()   // E.g., test_proto.Communique_Msg
+						vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
+						if vf.Kind() == reflect.Ptr {
+							discardLegacy(vf.Interface().(Message))
+						}
+					}
+				}
+			}
+		}
+	}
+
+	if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
+		if vf.Type() != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		vf.Set(reflect.ValueOf([]byte(nil)))
+	}
+
+	// For proto2 messages, only discard unknown fields in message extensions
+	// that have been accessed via GetExtension.
+	if em, err := extendable(m); err == nil {
+		// Ignore lock since discardLegacy is not concurrency safe.
+		emm, _ := em.extensionsRead()
+		for _, mx := range emm {
+			if m, ok := mx.value.(Message); ok {
+				discardLegacy(m)
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go
new file mode 100644
index 0000000..93464c9
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration.go
@@ -0,0 +1,100 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements conversions between google.protobuf.Duration
+// and time.Duration.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Range of a Duration in seconds, as specified in
+	// google/protobuf/duration.proto. This is about 10,000 years in seconds.
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// validateDuration determines whether the Duration is valid according to the
+// definition in google/protobuf/duration.proto. A valid Duration
+// may still be too large to fit into a time.Duration (the range of Duration
+// is about 10,000 years, and the range of time.Duration is about 290).
+func validateDuration(d *duration) error {
+	if d == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if d.Seconds < minSeconds || d.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %#v: seconds out of range", d)
+	}
+	if d.Nanos <= -1e9 || d.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %#v: nanos out of range", d)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) {
+		return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d)
+	}
+	return nil
+}
+
+// DurationFromProto converts a Duration to a time.Duration. DurationFromProto
+// returns an error if the Duration is invalid or is too large to be
+// represented in a time.Duration.
+func durationFromProto(p *duration) (time.Duration, error) {
+	if err := validateDuration(p); err != nil {
+		return 0, err
+	}
+	d := time.Duration(p.Seconds) * time.Second
+	if int64(d/time.Second) != p.Seconds {
+		return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+	}
+	if p.Nanos != 0 {
+		d += time.Duration(p.Nanos)
+		if (d < 0) != (p.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a Duration.
+func durationProto(d time.Duration) *duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &duration{
+		Seconds: secs,
+		Nanos:   int32(nanos),
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
new file mode 100644
index 0000000..e748e17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem()
+
+type duration struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *duration) Reset()       { *m = duration{} }
+func (*duration) ProtoMessage()  {}
+func (*duration) String() string { return "duration<string>" }
+
+func init() {
+	RegisterType((*duration)(nil), "gogo.protobuf.proto.duration")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go
new file mode 100644
index 0000000..9581ccd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode.go
@@ -0,0 +1,205 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"errors"
+	"reflect"
+)
+
+var (
+	// errRepeatedHasNil is the error returned if Marshal is called with
+	// a struct with a repeated field containing a nil element.
+	errRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+	// errOneofHasNil is the error returned if Marshal is called with
+	// a struct with a oneof field containing a nil element.
+	errOneofHasNil = errors.New("proto: oneof field has nil value")
+
+	// ErrNil is the error returned if Marshal is called with nil.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// ErrTooLarge is the error returned if Marshal is called with a
+	// message that encodes to >2GB.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+	var buf [maxVarintBytes]byte
+	var n int
+	for n = 0; x > 127; n++ {
+		buf[n] = 0x80 | uint8(x&0x7F)
+		x >>= 7
+	}
+	buf[n] = uint8(x)
+	n++
+	return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+	for x >= 1<<7 {
+		p.buf = append(p.buf, uint8(x&0x7f|0x80))
+		x >>= 7
+	}
+	p.buf = append(p.buf, uint8(x))
+	return nil
+}
+
+// SizeVarint returns the varint encoding size of an integer.
+func SizeVarint(x uint64) int {
+	switch {
+	case x < 1<<7:
+		return 1
+	case x < 1<<14:
+		return 2
+	case x < 1<<21:
+		return 3
+	case x < 1<<28:
+		return 4
+	case x < 1<<35:
+		return 5
+	case x < 1<<42:
+		return 6
+	case x < 1<<49:
+		return 7
+	case x < 1<<56:
+		return 8
+	case x < 1<<63:
+		return 9
+	}
+	return 10
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24),
+		uint8(x>>32),
+		uint8(x>>40),
+		uint8(x>>48),
+		uint8(x>>56))
+	return nil
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+	p.buf = append(p.buf,
+		uint8(x),
+		uint8(x>>8),
+		uint8(x>>16),
+		uint8(x>>24))
+	return nil
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+	// use signed number to get arithmetic right shift.
+	return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+	p.EncodeVarint(uint64(len(b)))
+	p.buf = append(p.buf, b...)
+	return nil
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+	p.EncodeVarint(uint64(len(s)))
+	p.buf = append(p.buf, s...)
+	return nil
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+	Marshal() ([]byte, error)
+}
+
+// EncodeMessage writes the protocol buffer to the Buffer,
+// prefixed by a varint-encoded length.
+func (p *Buffer) EncodeMessage(pb Message) error {
+	siz := Size(pb)
+	sizVar := SizeVarint(uint64(siz))
+	p.grow(siz + sizVar)
+	p.EncodeVarint(uint64(siz))
+	return p.Marshal(pb)
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+		return v.IsNil()
+	}
+	return false
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
new file mode 100644
index 0000000..0f5fb17
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go
@@ -0,0 +1,33 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+func NewRequiredNotSetError(field string) *RequiredNotSetError {
+	return &RequiredNotSetError{field}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go
new file mode 100644
index 0000000..d4db5a1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/equal.go
@@ -0,0 +1,300 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+
+package proto
+
+import (
+	"bytes"
+	"log"
+	"reflect"
+	"strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+  - Two messages are equal iff they are the same type,
+    corresponding fields are equal, unknown field sets
+    are equal, and extensions sets are equal.
+  - Two set scalar fields are equal iff their values are equal.
+    If the fields are of a floating-point type, remember that
+    NaN != x for all x, including NaN. If the message is defined
+    in a proto3 .proto file, fields are not "set"; specifically,
+    zero length proto3 "bytes" fields are equal (nil == {}).
+  - Two repeated fields are equal iff their lengths are the same,
+    and their corresponding elements are equal. Note a "bytes" field,
+    although represented by []byte, is not a repeated field and the
+    rule for the scalar fields described above applies.
+  - Two unset fields are equal.
+  - Two unknown field sets are equal if their current
+    encoded state is equal.
+  - Two extension sets are equal iff they have corresponding
+    elements that are pairwise equal.
+  - Two map fields are equal iff their lengths are the same,
+    and they contain the same set of elements. Zero-length map
+    fields are equal.
+  - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+	if a == nil || b == nil {
+		return a == b
+	}
+	v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+	if v1.Type() != v2.Type() {
+		return false
+	}
+	if v1.Kind() == reflect.Ptr {
+		if v1.IsNil() {
+			return v2.IsNil()
+		}
+		if v2.IsNil() {
+			return false
+		}
+		v1, v2 = v1.Elem(), v2.Elem()
+	}
+	if v1.Kind() != reflect.Struct {
+		return false
+	}
+	return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+	sprop := GetProperties(v1.Type())
+	for i := 0; i < v1.NumField(); i++ {
+		f := v1.Type().Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		f1, f2 := v1.Field(i), v2.Field(i)
+		if f.Type.Kind() == reflect.Ptr {
+			if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+				// both unset
+				continue
+			} else if n1 != n2 {
+				// set/unset mismatch
+				return false
+			}
+			f1, f2 = f1.Elem(), f2.Elem()
+		}
+		if !equalAny(f1, f2, sprop.Prop[i]) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_InternalExtensions")
+		if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) {
+			return false
+		}
+	}
+
+	if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+		em2 := v2.FieldByName("XXX_extensions")
+		if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+			return false
+		}
+	}
+
+	uf := v1.FieldByName("XXX_unrecognized")
+	if !uf.IsValid() {
+		return true
+	}
+
+	u1 := uf.Bytes()
+	u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+	return bytes.Equal(u1, u2)
+}
+
+// v1 and v2 are known to have the same type.
+// prop may be nil.
+func equalAny(v1, v2 reflect.Value, prop *Properties) bool {
+	if v1.Type() == protoMessageType {
+		m1, _ := v1.Interface().(Message)
+		m2, _ := v2.Interface().(Message)
+		return Equal(m1, m2)
+	}
+	switch v1.Kind() {
+	case reflect.Bool:
+		return v1.Bool() == v2.Bool()
+	case reflect.Float32, reflect.Float64:
+		return v1.Float() == v2.Float()
+	case reflect.Int32, reflect.Int64:
+		return v1.Int() == v2.Int()
+	case reflect.Interface:
+		// Probably a oneof field; compare the inner values.
+		n1, n2 := v1.IsNil(), v2.IsNil()
+		if n1 || n2 {
+			return n1 == n2
+		}
+		e1, e2 := v1.Elem(), v2.Elem()
+		if e1.Type() != e2.Type() {
+			return false
+		}
+		return equalAny(e1, e2, nil)
+	case reflect.Map:
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for _, key := range v1.MapKeys() {
+			val2 := v2.MapIndex(key)
+			if !val2.IsValid() {
+				// This key was not found in the second map.
+				return false
+			}
+			if !equalAny(v1.MapIndex(key), val2, nil) {
+				return false
+			}
+		}
+		return true
+	case reflect.Ptr:
+		// Maps may have nil values in them, so check for nil.
+		if v1.IsNil() && v2.IsNil() {
+			return true
+		}
+		if v1.IsNil() != v2.IsNil() {
+			return false
+		}
+		return equalAny(v1.Elem(), v2.Elem(), prop)
+	case reflect.Slice:
+		if v1.Type().Elem().Kind() == reflect.Uint8 {
+			// short circuit: []byte
+
+			// Edge case: if this is in a proto3 message, a zero length
+			// bytes field is considered the zero value.
+			if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 {
+				return true
+			}
+			if v1.IsNil() != v2.IsNil() {
+				return false
+			}
+			return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+		}
+
+		if v1.Len() != v2.Len() {
+			return false
+		}
+		for i := 0; i < v1.Len(); i++ {
+			if !equalAny(v1.Index(i), v2.Index(i), prop) {
+				return false
+			}
+		}
+		return true
+	case reflect.String:
+		return v1.Interface().(string) == v2.Interface().(string)
+	case reflect.Struct:
+		return equalStruct(v1, v2)
+	case reflect.Uint32, reflect.Uint64:
+		return v1.Uint() == v2.Uint()
+	}
+
+	// unknown type, so not a protocol buffer
+	log.Printf("proto: don't know how to compare %v", v1)
+	return false
+}
+
+// base is the struct type that the extensions are based on.
+// x1 and x2 are InternalExtensions.
+func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool {
+	em1, _ := x1.extensionsRead()
+	em2, _ := x2.extensionsRead()
+	return equalExtMap(base, em1, em2)
+}
+
+func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool {
+	if len(em1) != len(em2) {
+		return false
+	}
+
+	for extNum, e1 := range em1 {
+		e2, ok := em2[extNum]
+		if !ok {
+			return false
+		}
+
+		m1, m2 := e1.value, e2.value
+
+		if m1 == nil && m2 == nil {
+			// Both have only encoded form.
+			if bytes.Equal(e1.enc, e2.enc) {
+				continue
+			}
+			// The bytes are different, but the extensions might still be
+			// equal. We need to decode them to compare.
+		}
+
+		if m1 != nil && m2 != nil {
+			// Both are unencoded.
+			if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+				return false
+			}
+			continue
+		}
+
+		// At least one is encoded. To do a semantically correct comparison
+		// we need to unmarshal them first.
+		var desc *ExtensionDesc
+		if m := extensionMaps[base]; m != nil {
+			desc = m[extNum]
+		}
+		if desc == nil {
+			// If both have only encoded form and the bytes are the same,
+			// it is handled above. We get here when the bytes are different.
+			// We don't know how to decode it, so just compare them as byte
+			// slices.
+			log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+			return false
+		}
+		var err error
+		if m1 == nil {
+			m1, err = decodeExtension(e1.enc, desc)
+		}
+		if m2 == nil && err == nil {
+			m2, err = decodeExtension(e2.enc, desc)
+		}
+		if err != nil {
+			// The encoded form is invalid.
+			log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+			return false
+		}
+		if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) {
+			return false
+		}
+	}
+
+	return true
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go
new file mode 100644
index 0000000..341c6f5
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions.go
@@ -0,0 +1,605 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"strconv"
+	"sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+	Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer generated by the current
+// proto compiler that may be extended.
+type extendableProto interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	extensionsWrite() map[int32]Extension
+	extensionsRead() (map[int32]Extension, sync.Locker)
+}
+
+// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous
+// version of the proto compiler that may be extended.
+type extendableProtoV1 interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	ExtensionMap() map[int32]Extension
+}
+
+// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto.
+type extensionAdapter struct {
+	extendableProtoV1
+}
+
+func (e extensionAdapter) extensionsWrite() map[int32]Extension {
+	return e.ExtensionMap()
+}
+
+func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	return e.ExtensionMap(), notLocker{}
+}
+
+// notLocker is a sync.Locker whose Lock and Unlock methods are nops.
+type notLocker struct{}
+
+func (n notLocker) Lock()   {}
+func (n notLocker) Unlock() {}
+
+// extendable returns the extendableProto interface for the given generated proto message.
+// If the proto message has the old extension format, it returns a wrapper that implements
+// the extendableProto interface.
+func extendable(p interface{}) (extendableProto, error) {
+	switch p := p.(type) {
+	case extendableProto:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return p, nil
+	case extendableProtoV1:
+		if isNilPtr(p) {
+			return nil, fmt.Errorf("proto: nil %T is not extendable", p)
+		}
+		return extensionAdapter{p}, nil
+	case extensionsBytes:
+		return slowExtensionAdapter{p}, nil
+	}
+	// Don't allocate a specific error containing %T:
+	// this is the hot path for Clone and MarshalText.
+	return nil, errNotExtendable
+}
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+func isNilPtr(x interface{}) bool {
+	v := reflect.ValueOf(x)
+	return v.Kind() == reflect.Ptr && v.IsNil()
+}
+
+// XXX_InternalExtensions is an internal representation of proto extensions.
+//
+// Each generated message struct type embeds an anonymous XXX_InternalExtensions field,
+// thus gaining the unexported 'extensions' method, which can be called only from the proto package.
+//
+// The methods of XXX_InternalExtensions are not concurrency safe in general,
+// but calls to logically read-only methods such as has and get may be executed concurrently.
+type XXX_InternalExtensions struct {
+	// The struct must be indirect so that if a user inadvertently copies a
+	// generated message and its embedded XXX_InternalExtensions, they
+	// avoid the mayhem of a copied mutex.
+	//
+	// The mutex serializes all logically read-only operations to p.extensionMap.
+	// It is up to the client to ensure that write operations to p.extensionMap are
+	// mutually exclusive with other accesses.
+	p *struct {
+		mu           sync.Mutex
+		extensionMap map[int32]Extension
+	}
+}
+
+// extensionsWrite returns the extension map, creating it on first use.
+func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension {
+	if e.p == nil {
+		e.p = new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		})
+		e.p.extensionMap = make(map[int32]Extension)
+	}
+	return e.p.extensionMap
+}
+
+// extensionsRead returns the extensions map for read-only use.  It may be nil.
+// The caller must hold the returned mutex's lock when accessing Elements within the map.
+func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) {
+	if e.p == nil {
+		return nil, nil
+	}
+	return e.p.extensionMap, &e.p.mu
+}
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+	ExtendedType  Message     // nil pointer to the type that is being extended
+	ExtensionType interface{} // nil pointer to the extension type
+	Field         int32       // field number
+	Name          string      // fully-qualified name of extension, for text formatting
+	Tag           string      // protobuf tag style
+	Filename      string      // name of the file in which the extension is defined
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+	t := reflect.TypeOf(ed.ExtensionType)
+	return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+	// When an extension is stored in a message using SetExtension
+	// only desc and value are set. When the message is marshaled
+	// enc will be set to the encoded form of the message.
+	//
+	// When a message is unmarshaled and contains extensions, each
+	// extension will have only enc set. When such an extension is
+	// accessed using GetExtension (or GetExtensions) desc and value
+	// will be set.
+	desc  *ExtensionDesc
+	value interface{}
+	enc   []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base Message, id int32, b []byte) {
+	if ebase, ok := base.(extensionsBytes); ok {
+		clearExtension(base, id)
+		ext := ebase.GetExtensions()
+		*ext = append(*ext, b...)
+		return
+	}
+	epb, err := extendable(base)
+	if err != nil {
+		return
+	}
+	extmap := epb.extensionsWrite()
+	extmap[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+	for _, er := range pb.ExtensionRangeArray() {
+		if er.Start <= field && field <= er.End {
+			return true
+		}
+	}
+	return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+	var pbi interface{} = pb
+	// Check the extended type.
+	if ea, ok := pbi.(extensionAdapter); ok {
+		pbi = ea.extendableProtoV1
+	}
+	if ea, ok := pbi.(slowExtensionAdapter); ok {
+		pbi = ea.extensionsBytes
+	}
+	if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b {
+		return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a)
+	}
+	// Check the range.
+	if !isExtensionField(pb, extension.Field) {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+	base  reflect.Type
+	field int32
+}
+
+var extProp = struct {
+	sync.RWMutex
+	m map[extPropKey]*Properties
+}{
+	m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+	key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+	extProp.RLock()
+	if prop, ok := extProp.m[key]; ok {
+		extProp.RUnlock()
+		return prop
+	}
+	extProp.RUnlock()
+
+	extProp.Lock()
+	defer extProp.Unlock()
+	// Check again.
+	if prop, ok := extProp.m[key]; ok {
+		return prop
+	}
+
+	prop := new(Properties)
+	prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+	extProp.m[key] = prop
+	return prop
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb Message, extension *ExtensionDesc) bool {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		buf := *ext
+		o := 0
+		for o < len(buf) {
+			tag, n := DecodeVarint(buf[o:])
+			fieldNum := int32(tag >> 3)
+			if int32(fieldNum) == extension.Field {
+				return true
+			}
+			wireType := int(tag & 0x7)
+			o += n
+			l, err := size(buf[o:], wireType)
+			if err != nil {
+				return false
+			}
+			o += l
+		}
+		return false
+	}
+	// TODO: Check types, field numbers, etc.?
+	epb, err := extendable(pb)
+	if err != nil {
+		return false
+	}
+	extmap, mu := epb.extensionsRead()
+	if extmap == nil {
+		return false
+	}
+	mu.Lock()
+	_, ok := extmap[extension.Field]
+	mu.Unlock()
+	return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb Message, extension *ExtensionDesc) {
+	clearExtension(pb, extension.Field)
+}
+
+func clearExtension(pb Message, fieldNum int32) {
+	if epb, ok := pb.(extensionsBytes); ok {
+		offset := 0
+		for offset != -1 {
+			offset = deleteExtension(epb, fieldNum, offset)
+		}
+		return
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	// TODO: Check types, field numbers, etc.?
+	extmap := epb.extensionsWrite()
+	delete(extmap, fieldNum)
+}
+
+// GetExtension retrieves a proto2 extended field from pb.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes of the field extension.
+func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		return decodeExtensionFromBytes(extension, *ext)
+	}
+
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+
+	if extension.ExtendedType != nil {
+		// can only check type if this is a complete descriptor
+		if cerr := checkExtensionTypes(epb, extension); cerr != nil {
+			return nil, cerr
+		}
+	}
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return defaultExtensionValue(extension)
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	e, ok := emap[extension.Field]
+	if !ok {
+		// defaultExtensionValue returns the default value or
+		// ErrMissingExtension if there is no default.
+		return defaultExtensionValue(extension)
+	}
+
+	if e.value != nil {
+		// Already decoded. Check the descriptor, though.
+		if e.desc != extension {
+			// This shouldn't happen. If it does, it means that
+			// GetExtension was called twice with two different
+			// descriptors with the same field number.
+			return nil, errors.New("proto: descriptor conflict")
+		}
+		return e.value, nil
+	}
+
+	if extension.ExtensionType == nil {
+		// incomplete descriptor
+		return e.enc, nil
+	}
+
+	v, err := decodeExtension(e.enc, extension)
+	if err != nil {
+		return nil, err
+	}
+
+	// Remember the decoded version and drop the encoded version.
+	// That way it is safe to mutate what we return.
+	e.value = v
+	e.desc = extension
+	e.enc = nil
+	emap[extension.Field] = e
+	return e.value, nil
+}
+
+// defaultExtensionValue returns the default value for extension.
+// If no default for an extension is defined ErrMissingExtension is returned.
+func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) {
+	if extension.ExtensionType == nil {
+		// incomplete descriptor, so no default
+		return nil, ErrMissingExtension
+	}
+
+	t := reflect.TypeOf(extension.ExtensionType)
+	props := extensionProperties(extension)
+
+	sf, _, err := fieldDefault(t, props)
+	if err != nil {
+		return nil, err
+	}
+
+	if sf == nil || sf.value == nil {
+		// There is no default value.
+		return nil, ErrMissingExtension
+	}
+
+	if t.Kind() != reflect.Ptr {
+		// We do not need to return a Ptr, we can directly return sf.value.
+		return sf.value, nil
+	}
+
+	// We need to return an interface{} that is a pointer to sf.value.
+	value := reflect.New(t).Elem()
+	value.Set(reflect.New(value.Type().Elem()))
+	if sf.kind == reflect.Int32 {
+		// We may have an int32 or an enum, but the underlying data is int32.
+		// Since we can't set an int32 into a non int32 reflect.value directly
+		// set it as a int32.
+		value.Elem().SetInt(int64(sf.value.(int32)))
+	} else {
+		value.Elem().Set(reflect.ValueOf(sf.value))
+	}
+	return value.Interface(), nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+	t := reflect.TypeOf(extension.ExtensionType)
+	unmarshal := typeUnmarshaler(t, extension.Tag)
+
+	// t is a pointer to a struct, pointer to basic type or a slice.
+	// Allocate space to store the pointer/slice.
+	value := reflect.New(t).Elem()
+
+	var err error
+	for {
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		wire := int(x) & 7
+
+		b, err = unmarshal(b, valToPointer(value.Addr()), wire)
+		if err != nil {
+			return nil, err
+		}
+
+		if len(b) == 0 {
+			break
+		}
+	}
+	return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	extensions = make([]interface{}, len(es))
+	for i, e := range es {
+		extensions[i], err = GetExtension(epb, e)
+		if err == ErrMissingExtension {
+			err = nil
+		}
+		if err != nil {
+			return
+		}
+	}
+	return
+}
+
+// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order.
+// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing
+// just the Field field, which defines the extension's field number.
+func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) {
+	epb, err := extendable(pb)
+	if err != nil {
+		return nil, err
+	}
+	registeredExtensions := RegisteredExtensions(pb)
+
+	emap, mu := epb.extensionsRead()
+	if emap == nil {
+		return nil, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+	extensions := make([]*ExtensionDesc, 0, len(emap))
+	for extid, e := range emap {
+		desc := e.desc
+		if desc == nil {
+			desc = registeredExtensions[extid]
+			if desc == nil {
+				desc = &ExtensionDesc{Field: extid}
+			}
+		}
+
+		extensions = append(extensions, desc)
+	}
+	return extensions, nil
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error {
+	if epb, ok := pb.(extensionsBytes); ok {
+		ClearExtension(pb, extension)
+		newb, err := encodeExtension(extension, value)
+		if err != nil {
+			return err
+		}
+		bb := epb.GetExtensions()
+		*bb = append(*bb, newb...)
+		return nil
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return err
+	}
+	if err := checkExtensionTypes(epb, extension); err != nil {
+		return err
+	}
+	typ := reflect.TypeOf(extension.ExtensionType)
+	if typ != reflect.TypeOf(value) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType)
+	}
+	// nil extension values need to be caught early, because the
+	// encoder can't distinguish an ErrNil due to a nil extension
+	// from an ErrNil due to a missing field. Extensions are
+	// always optional, so the encoder would just swallow the error
+	// and drop all the extensions from the encoded message.
+	if reflect.ValueOf(value).IsNil() {
+		return fmt.Errorf("proto: SetExtension called with nil value of type %T", value)
+	}
+
+	extmap := epb.extensionsWrite()
+	extmap[extension.Field] = Extension{desc: extension, value: value}
+	return nil
+}
+
+// ClearAllExtensions clears all extensions from pb.
+func ClearAllExtensions(pb Message) {
+	if epb, doki := pb.(extensionsBytes); doki {
+		ext := epb.GetExtensions()
+		*ext = []byte{}
+		return
+	}
+	epb, err := extendable(pb)
+	if err != nil {
+		return
+	}
+	m := epb.extensionsWrite()
+	for k := range m {
+		delete(m, k)
+	}
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+	st := reflect.TypeOf(desc.ExtendedType).Elem()
+	m := extensionMaps[st]
+	if m == nil {
+		m = make(map[int32]*ExtensionDesc)
+		extensionMaps[st] = m
+	}
+	if _, ok := m[desc.Field]; ok {
+		panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+	}
+	m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+	return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
new file mode 100644
index 0000000..6f1ae12
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go
@@ -0,0 +1,389 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+)
+
+type extensionsBytes interface {
+	Message
+	ExtensionRangeArray() []ExtensionRange
+	GetExtensions() *[]byte
+}
+
+type slowExtensionAdapter struct {
+	extensionsBytes
+}
+
+func (s slowExtensionAdapter) extensionsWrite() map[int32]Extension {
+	panic("Please report a bug to github.com/gogo/protobuf if you see this message: Writing extensions is not supported for extensions stored in a byte slice field.")
+}
+
+func (s slowExtensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) {
+	b := s.GetExtensions()
+	m, err := BytesToExtensionsMap(*b)
+	if err != nil {
+		panic(err)
+	}
+	return m, notLocker{}
+}
+
+func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool {
+	if reflect.ValueOf(pb).IsNil() {
+		return ifnotset
+	}
+	value, err := GetExtension(pb, extension)
+	if err != nil {
+		return ifnotset
+	}
+	if value == nil {
+		return ifnotset
+	}
+	if value.(*bool) == nil {
+		return ifnotset
+	}
+	return *(value.(*bool))
+}
+
+func (this *Extension) Equal(that *Extension) bool {
+	if err := this.Encode(); err != nil {
+		return false
+	}
+	if err := that.Encode(); err != nil {
+		return false
+	}
+	return bytes.Equal(this.enc, that.enc)
+}
+
+func (this *Extension) Compare(that *Extension) int {
+	if err := this.Encode(); err != nil {
+		return 1
+	}
+	if err := that.Encode(); err != nil {
+		return -1
+	}
+	return bytes.Compare(this.enc, that.enc)
+}
+
+func SizeOfInternalExtension(m extendableProto) (n int) {
+	info := getMarshalInfo(reflect.TypeOf(m))
+	return info.sizeV1Extensions(m.extensionsWrite())
+}
+
+type sortableMapElem struct {
+	field int32
+	ext   Extension
+}
+
+func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions {
+	s := make(sortableExtensions, 0, len(m))
+	for k, v := range m {
+		s = append(s, &sortableMapElem{field: k, ext: v})
+	}
+	return s
+}
+
+type sortableExtensions []*sortableMapElem
+
+func (this sortableExtensions) Len() int { return len(this) }
+
+func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] }
+
+func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field }
+
+func (this sortableExtensions) String() string {
+	sort.Sort(this)
+	ss := make([]string, len(this))
+	for i := range this {
+		ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext)
+	}
+	return "map[" + strings.Join(ss, ",") + "]"
+}
+
+func StringFromInternalExtension(m extendableProto) string {
+	return StringFromExtensionsMap(m.extensionsWrite())
+}
+
+func StringFromExtensionsMap(m map[int32]Extension) string {
+	return newSortableExtensionsFromMap(m).String()
+}
+
+func StringFromExtensionsBytes(ext []byte) string {
+	m, err := BytesToExtensionsMap(ext)
+	if err != nil {
+		panic(err)
+	}
+	return StringFromExtensionsMap(m)
+}
+
+func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) {
+	return EncodeExtensionMap(m.extensionsWrite(), data)
+}
+
+func EncodeInternalExtensionBackwards(m extendableProto, data []byte) (n int, err error) {
+	return EncodeExtensionMapBackwards(m.extensionsWrite(), data)
+}
+
+func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) {
+	o := 0
+	for _, e := range m {
+		if err := e.Encode(); err != nil {
+			return 0, err
+		}
+		n := copy(data[o:], e.enc)
+		if n != len(e.enc) {
+			return 0, io.ErrShortBuffer
+		}
+		o += n
+	}
+	return o, nil
+}
+
+func EncodeExtensionMapBackwards(m map[int32]Extension, data []byte) (n int, err error) {
+	o := 0
+	end := len(data)
+	for _, e := range m {
+		if err := e.Encode(); err != nil {
+			return 0, err
+		}
+		n := copy(data[end-len(e.enc):], e.enc)
+		if n != len(e.enc) {
+			return 0, io.ErrShortBuffer
+		}
+		end -= n
+		o += n
+	}
+	return o, nil
+}
+
+func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) {
+	e := m[id]
+	if err := e.Encode(); err != nil {
+		return nil, err
+	}
+	return e.enc, nil
+}
+
+func size(buf []byte, wire int) (int, error) {
+	switch wire {
+	case WireVarint:
+		_, n := DecodeVarint(buf)
+		return n, nil
+	case WireFixed64:
+		return 8, nil
+	case WireBytes:
+		v, n := DecodeVarint(buf)
+		return int(v) + n, nil
+	case WireFixed32:
+		return 4, nil
+	case WireStartGroup:
+		offset := 0
+		for {
+			u, n := DecodeVarint(buf[offset:])
+			fwire := int(u & 0x7)
+			offset += n
+			if fwire == WireEndGroup {
+				return offset, nil
+			}
+			s, err := size(buf[offset:], wire)
+			if err != nil {
+				return 0, err
+			}
+			offset += s
+		}
+	}
+	return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire)
+}
+
+func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) {
+	m := make(map[int32]Extension)
+	i := 0
+	for i < len(buf) {
+		tag, n := DecodeVarint(buf[i:])
+		if n <= 0 {
+			return nil, fmt.Errorf("unable to decode varint")
+		}
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		l, err := size(buf[i+n:], wireType)
+		if err != nil {
+			return nil, err
+		}
+		end := i + int(l) + n
+		m[int32(fieldNum)] = Extension{enc: buf[i:end]}
+		i = end
+	}
+	return m, nil
+}
+
+func NewExtension(e []byte) Extension {
+	ee := Extension{enc: make([]byte, len(e))}
+	copy(ee.enc, e)
+	return ee
+}
+
+func AppendExtension(e Message, tag int32, buf []byte) {
+	if ee, eok := e.(extensionsBytes); eok {
+		ext := ee.GetExtensions()
+		*ext = append(*ext, buf...)
+		return
+	}
+	if ee, eok := e.(extendableProto); eok {
+		m := ee.extensionsWrite()
+		ext := m[int32(tag)] // may be missing
+		ext.enc = append(ext.enc, buf...)
+		m[int32(tag)] = ext
+	}
+}
+
+func encodeExtension(extension *ExtensionDesc, value interface{}) ([]byte, error) {
+	u := getMarshalInfo(reflect.TypeOf(extension.ExtendedType))
+	ei := u.getExtElemInfo(extension)
+	v := value
+	p := toAddrPointer(&v, ei.isptr)
+	siz := ei.sizer(p, SizeVarint(ei.wiretag))
+	buf := make([]byte, 0, siz)
+	return ei.marshaler(buf, p, ei.wiretag, false)
+}
+
+func decodeExtensionFromBytes(extension *ExtensionDesc, buf []byte) (interface{}, error) {
+	o := 0
+	for o < len(buf) {
+		tag, n := DecodeVarint((buf)[o:])
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		if o+n > len(buf) {
+			return nil, fmt.Errorf("unable to decode extension")
+		}
+		l, err := size((buf)[o+n:], wireType)
+		if err != nil {
+			return nil, err
+		}
+		if int32(fieldNum) == extension.Field {
+			if o+n+l > len(buf) {
+				return nil, fmt.Errorf("unable to decode extension")
+			}
+			v, err := decodeExtension((buf)[o:o+n+l], extension)
+			if err != nil {
+				return nil, err
+			}
+			return v, nil
+		}
+		o += n + l
+	}
+	return defaultExtensionValue(extension)
+}
+
+func (this *Extension) Encode() error {
+	if this.enc == nil {
+		var err error
+		this.enc, err = encodeExtension(this.desc, this.value)
+		if err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (this Extension) GoString() string {
+	if err := this.Encode(); err != nil {
+		return fmt.Sprintf("error encoding extension: %v", err)
+	}
+	return fmt.Sprintf("proto.NewExtension(%#v)", this.enc)
+}
+
+func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error {
+	typ := reflect.TypeOf(pb).Elem()
+	ext, ok := extensionMaps[typ]
+	if !ok {
+		return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+	}
+	desc, ok := ext[fieldNum]
+	if !ok {
+		return errors.New("proto: bad extension number; not in declared ranges")
+	}
+	return SetExtension(pb, desc, value)
+}
+
+func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) {
+	typ := reflect.TypeOf(pb).Elem()
+	ext, ok := extensionMaps[typ]
+	if !ok {
+		return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String())
+	}
+	desc, ok := ext[fieldNum]
+	if !ok {
+		return nil, fmt.Errorf("unregistered field number %d", fieldNum)
+	}
+	return GetExtension(pb, desc)
+}
+
+func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions {
+	x := &XXX_InternalExtensions{
+		p: new(struct {
+			mu           sync.Mutex
+			extensionMap map[int32]Extension
+		}),
+	}
+	x.p.extensionMap = m
+	return *x
+}
+
+func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension {
+	pb := extendable.(extendableProto)
+	return pb.extensionsWrite()
+}
+
+func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int {
+	ext := pb.GetExtensions()
+	for offset < len(*ext) {
+		tag, n1 := DecodeVarint((*ext)[offset:])
+		fieldNum := int32(tag >> 3)
+		wireType := int(tag & 0x7)
+		n2, err := size((*ext)[offset+n1:], wireType)
+		if err != nil {
+			panic(err)
+		}
+		newOffset := offset + n1 + n2
+		if fieldNum == theFieldNum {
+			*ext = append((*ext)[:offset], (*ext)[newOffset:]...)
+			return offset
+		}
+		offset = newOffset
+	}
+	return -1
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go
new file mode 100644
index 0000000..80db1c1
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib.go
@@ -0,0 +1,973 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+Package proto converts data structures to and from the wire format of
+protocol buffers.  It works in concert with the Go source code generated
+for .proto files by the protocol compiler.
+
+A summary of the properties of the protocol buffer interface
+for a protocol buffer variable v:
+
+  - Names are turned from camel_case to CamelCase for export.
+  - There are no methods on v to set fields; just treat
+	them as structure fields.
+  - There are getters that return a field's value if set,
+	and return the field's default value if unset.
+	The getters work even if the receiver is a nil message.
+  - The zero value for a struct is its correct initialization state.
+	All desired fields must be set before marshaling.
+  - A Reset() method will restore a protobuf struct to its zero state.
+  - Non-repeated fields are pointers to the values; nil means unset.
+	That is, optional or required field int32 f becomes F *int32.
+  - Repeated fields are slices.
+  - Helper functions are available to aid the setting of fields.
+	msg.Foo = proto.String("hello") // set field
+  - Constants are defined to hold the default values of all fields that
+	have them.  They have the form Default_StructName_FieldName.
+	Because the getter methods handle defaulted values,
+	direct use of these constants should be rare.
+  - Enums are given type names and maps from names to values.
+	Enum values are prefixed by the enclosing message's name, or by the
+	enum's type name if it is a top-level enum. Enum types have a String
+	method, and a Enum method to assist in message construction.
+  - Nested messages, groups and enums have type names prefixed with the name of
+	the surrounding message type.
+  - Extensions are given descriptor names that start with E_,
+	followed by an underscore-delimited list of the nested messages
+	that contain it (if any) followed by the CamelCased name of the
+	extension field itself.  HasExtension, ClearExtension, GetExtension
+	and SetExtension are functions for manipulating extensions.
+  - Oneof field sets are given a single field in their message,
+	with distinguished wrapper types for each possible field value.
+  - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+When the .proto file specifies `syntax="proto3"`, there are some differences:
+
+  - Non-repeated fields of non-message type are values instead of pointers.
+  - Enum types do not get an Enum method.
+
+The simplest way to describe this is to see an example.
+Given file test.proto, containing
+
+	package example;
+
+	enum FOO { X = 17; }
+
+	message Test {
+	  required string label = 1;
+	  optional int32 type = 2 [default=77];
+	  repeated int64 reps = 3;
+	  optional group OptionalGroup = 4 {
+	    required string RequiredField = 5;
+	  }
+	  oneof union {
+	    int32 number = 6;
+	    string name = 7;
+	  }
+	}
+
+The resulting file, test.pb.go, is:
+
+	package example
+
+	import proto "github.com/gogo/protobuf/proto"
+	import math "math"
+
+	type FOO int32
+	const (
+		FOO_X FOO = 17
+	)
+	var FOO_name = map[int32]string{
+		17: "X",
+	}
+	var FOO_value = map[string]int32{
+		"X": 17,
+	}
+
+	func (x FOO) Enum() *FOO {
+		p := new(FOO)
+		*p = x
+		return p
+	}
+	func (x FOO) String() string {
+		return proto.EnumName(FOO_name, int32(x))
+	}
+	func (x *FOO) UnmarshalJSON(data []byte) error {
+		value, err := proto.UnmarshalJSONEnum(FOO_value, data)
+		if err != nil {
+			return err
+		}
+		*x = FOO(value)
+		return nil
+	}
+
+	type Test struct {
+		Label         *string             `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+		Type          *int32              `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+		Reps          []int64             `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+		Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+		// Types that are valid to be assigned to Union:
+		//	*Test_Number
+		//	*Test_Name
+		Union            isTest_Union `protobuf_oneof:"union"`
+		XXX_unrecognized []byte       `json:"-"`
+	}
+	func (m *Test) Reset()         { *m = Test{} }
+	func (m *Test) String() string { return proto.CompactTextString(m) }
+	func (*Test) ProtoMessage() {}
+
+	type isTest_Union interface {
+		isTest_Union()
+	}
+
+	type Test_Number struct {
+		Number int32 `protobuf:"varint,6,opt,name=number"`
+	}
+	type Test_Name struct {
+		Name string `protobuf:"bytes,7,opt,name=name"`
+	}
+
+	func (*Test_Number) isTest_Union() {}
+	func (*Test_Name) isTest_Union()   {}
+
+	func (m *Test) GetUnion() isTest_Union {
+		if m != nil {
+			return m.Union
+		}
+		return nil
+	}
+	const Default_Test_Type int32 = 77
+
+	func (m *Test) GetLabel() string {
+		if m != nil && m.Label != nil {
+			return *m.Label
+		}
+		return ""
+	}
+
+	func (m *Test) GetType() int32 {
+		if m != nil && m.Type != nil {
+			return *m.Type
+		}
+		return Default_Test_Type
+	}
+
+	func (m *Test) GetOptionalgroup() *Test_OptionalGroup {
+		if m != nil {
+			return m.Optionalgroup
+		}
+		return nil
+	}
+
+	type Test_OptionalGroup struct {
+		RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+	}
+	func (m *Test_OptionalGroup) Reset()         { *m = Test_OptionalGroup{} }
+	func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) }
+
+	func (m *Test_OptionalGroup) GetRequiredField() string {
+		if m != nil && m.RequiredField != nil {
+			return *m.RequiredField
+		}
+		return ""
+	}
+
+	func (m *Test) GetNumber() int32 {
+		if x, ok := m.GetUnion().(*Test_Number); ok {
+			return x.Number
+		}
+		return 0
+	}
+
+	func (m *Test) GetName() string {
+		if x, ok := m.GetUnion().(*Test_Name); ok {
+			return x.Name
+		}
+		return ""
+	}
+
+	func init() {
+		proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+	}
+
+To create and play with a Test object:
+
+	package main
+
+	import (
+		"log"
+
+		"github.com/gogo/protobuf/proto"
+		pb "./example.pb"
+	)
+
+	func main() {
+		test := &pb.Test{
+			Label: proto.String("hello"),
+			Type:  proto.Int32(17),
+			Reps:  []int64{1, 2, 3},
+			Optionalgroup: &pb.Test_OptionalGroup{
+				RequiredField: proto.String("good bye"),
+			},
+			Union: &pb.Test_Name{"fred"},
+		}
+		data, err := proto.Marshal(test)
+		if err != nil {
+			log.Fatal("marshaling error: ", err)
+		}
+		newTest := &pb.Test{}
+		err = proto.Unmarshal(data, newTest)
+		if err != nil {
+			log.Fatal("unmarshaling error: ", err)
+		}
+		// Now test and newTest contain the same data.
+		if test.GetLabel() != newTest.GetLabel() {
+			log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+		}
+		// Use a type switch to determine which oneof was set.
+		switch u := test.Union.(type) {
+		case *pb.Test_Number: // u.Number contains the number.
+		case *pb.Test_Name: // u.Name contains the string.
+		}
+		// etc.
+	}
+*/
+package proto
+
+import (
+	"encoding/json"
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"sync"
+)
+
+// RequiredNotSetError is an error type returned by either Marshal or Unmarshal.
+// Marshal reports this when a required field is not initialized.
+// Unmarshal reports this when a required field is missing from the wire data.
+type RequiredNotSetError struct{ field string }
+
+func (e *RequiredNotSetError) Error() string {
+	if e.field == "" {
+		return fmt.Sprintf("proto: required field not set")
+	}
+	return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+type invalidUTF8Error struct{ field string }
+
+func (e *invalidUTF8Error) Error() string {
+	if e.field == "" {
+		return "proto: invalid UTF-8 detected"
+	}
+	return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field)
+}
+func (e *invalidUTF8Error) InvalidUTF8() bool {
+	return true
+}
+
+// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8.
+// This error should not be exposed to the external API as such errors should
+// be recreated with the field information.
+var errInvalidUTF8 = &invalidUTF8Error{}
+
+// isNonFatal reports whether the error is either a RequiredNotSet error
+// or a InvalidUTF8 error.
+func isNonFatal(err error) bool {
+	if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() {
+		return true
+	}
+	if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() {
+		return true
+	}
+	return false
+}
+
+type nonFatal struct{ E error }
+
+// Merge merges err into nf and reports whether it was successful.
+// Otherwise it returns false for any fatal non-nil errors.
+func (nf *nonFatal) Merge(err error) (ok bool) {
+	if err == nil {
+		return true // not an error
+	}
+	if !isNonFatal(err) {
+		return false // fatal error
+	}
+	if nf.E == nil {
+		nf.E = err // store first instance of non-fatal error
+	}
+	return true
+}
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+	Reset()
+	String() string
+	ProtoMessage()
+}
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers.  It may be reused between invocations to
+// reduce memory usage.  It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+	buf   []byte // encode/decode byte stream
+	index int    // read point
+
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+	return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+	p.buf = p.buf[0:0] // for reading/writing
+	p.index = 0        // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+	p.buf = s
+	p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+// SetDeterministic sets whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (p *Buffer) SetDeterministic(deterministic bool) {
+	p.deterministic = deterministic
+}
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+	return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+	return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+	p := new(int32)
+	*p = int32(v)
+	return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+	return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+	return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+	return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+	return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+	return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+	return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name.  Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (p *Buffer) DebugPrint(s string, b []byte) {
+	var u uint64
+
+	obuf := p.buf
+	sindex := p.index
+	p.buf = b
+	p.index = 0
+	depth := 0
+
+	fmt.Printf("\n--- %s ---\n", s)
+
+out:
+	for {
+		for i := 0; i < depth; i++ {
+			fmt.Print("  ")
+		}
+
+		index := p.index
+		if index == len(p.buf) {
+			break
+		}
+
+		op, err := p.DecodeVarint()
+		if err != nil {
+			fmt.Printf("%3d: fetching op err %v\n", index, err)
+			break out
+		}
+		tag := op >> 3
+		wire := op & 7
+
+		switch wire {
+		default:
+			fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+				index, tag, wire)
+			break out
+
+		case WireBytes:
+			var r []byte
+
+			r, err = p.DecodeRawBytes(false)
+			if err != nil {
+				break out
+			}
+			fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+			if len(r) <= 6 {
+				for i := 0; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			} else {
+				for i := 0; i < 3; i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+				fmt.Printf(" ..")
+				for i := len(r) - 3; i < len(r); i++ {
+					fmt.Printf(" %.2x", r[i])
+				}
+			}
+			fmt.Printf("\n")
+
+		case WireFixed32:
+			u, err = p.DecodeFixed32()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+		case WireFixed64:
+			u, err = p.DecodeFixed64()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+
+		case WireVarint:
+			u, err = p.DecodeVarint()
+			if err != nil {
+				fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+				break out
+			}
+			fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+		case WireStartGroup:
+			fmt.Printf("%3d: t=%3d start\n", index, tag)
+			depth++
+
+		case WireEndGroup:
+			depth--
+			fmt.Printf("%3d: t=%3d end\n", index, tag)
+		}
+	}
+
+	if depth != 0 {
+		fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth)
+	}
+	fmt.Printf("\n")
+
+	p.buf = obuf
+	p.index = sindex
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+	setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+	if v.Kind() == reflect.Ptr {
+		v = v.Elem()
+	}
+
+	defaultMu.RLock()
+	dm, ok := defaults[v.Type()]
+	defaultMu.RUnlock()
+	if !ok {
+		dm = buildDefaultMessage(v.Type())
+		defaultMu.Lock()
+		defaults[v.Type()] = dm
+		defaultMu.Unlock()
+	}
+
+	for _, sf := range dm.scalars {
+		f := v.Field(sf.index)
+		if !f.IsNil() {
+			// field already set
+			continue
+		}
+		dv := sf.value
+		if dv == nil && !zeros {
+			// no explicit default, and don't want to set zeros
+			continue
+		}
+		fptr := f.Addr().Interface() // **T
+		// TODO: Consider batching the allocations we do here.
+		switch sf.kind {
+		case reflect.Bool:
+			b := new(bool)
+			if dv != nil {
+				*b = dv.(bool)
+			}
+			*(fptr.(**bool)) = b
+		case reflect.Float32:
+			f := new(float32)
+			if dv != nil {
+				*f = dv.(float32)
+			}
+			*(fptr.(**float32)) = f
+		case reflect.Float64:
+			f := new(float64)
+			if dv != nil {
+				*f = dv.(float64)
+			}
+			*(fptr.(**float64)) = f
+		case reflect.Int32:
+			// might be an enum
+			if ft := f.Type(); ft != int32PtrType {
+				// enum
+				f.Set(reflect.New(ft.Elem()))
+				if dv != nil {
+					f.Elem().SetInt(int64(dv.(int32)))
+				}
+			} else {
+				// int32 field
+				i := new(int32)
+				if dv != nil {
+					*i = dv.(int32)
+				}
+				*(fptr.(**int32)) = i
+			}
+		case reflect.Int64:
+			i := new(int64)
+			if dv != nil {
+				*i = dv.(int64)
+			}
+			*(fptr.(**int64)) = i
+		case reflect.String:
+			s := new(string)
+			if dv != nil {
+				*s = dv.(string)
+			}
+			*(fptr.(**string)) = s
+		case reflect.Uint8:
+			// exceptional case: []byte
+			var b []byte
+			if dv != nil {
+				db := dv.([]byte)
+				b = make([]byte, len(db))
+				copy(b, db)
+			} else {
+				b = []byte{}
+			}
+			*(fptr.(*[]byte)) = b
+		case reflect.Uint32:
+			u := new(uint32)
+			if dv != nil {
+				*u = dv.(uint32)
+			}
+			*(fptr.(**uint32)) = u
+		case reflect.Uint64:
+			u := new(uint64)
+			if dv != nil {
+				*u = dv.(uint64)
+			}
+			*(fptr.(**uint64)) = u
+		default:
+			log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+		}
+	}
+
+	for _, ni := range dm.nested {
+		f := v.Field(ni)
+		// f is *T or T or []*T or []T
+		switch f.Kind() {
+		case reflect.Struct:
+			setDefaults(f, recur, zeros)
+
+		case reflect.Ptr:
+			if f.IsNil() {
+				continue
+			}
+			setDefaults(f, recur, zeros)
+
+		case reflect.Slice:
+			for i := 0; i < f.Len(); i++ {
+				e := f.Index(i)
+				if e.Kind() == reflect.Ptr && e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+
+		case reflect.Map:
+			for _, k := range f.MapKeys() {
+				e := f.MapIndex(k)
+				if e.IsNil() {
+					continue
+				}
+				setDefaults(e, recur, zeros)
+			}
+		}
+	}
+}
+
+var (
+	// defaults maps a protocol buffer struct type to a slice of the fields,
+	// with its scalar fields set to their proto-declared non-zero default values.
+	defaultMu sync.RWMutex
+	defaults  = make(map[reflect.Type]defaultMessage)
+
+	int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+	scalars []scalarField
+	nested  []int // struct field index of nested messages
+}
+
+type scalarField struct {
+	index int          // struct field index
+	kind  reflect.Kind // element type (the T in *T or []T)
+	value interface{}  // the proto-declared default value, or nil
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+	sprop := GetProperties(t)
+	for _, prop := range sprop.Prop {
+		fi, ok := sprop.decoderTags.get(prop.Tag)
+		if !ok {
+			// XXX_unrecognized
+			continue
+		}
+		ft := t.Field(fi).Type
+
+		sf, nested, err := fieldDefault(ft, prop)
+		switch {
+		case err != nil:
+			log.Print(err)
+		case nested:
+			dm.nested = append(dm.nested, fi)
+		case sf != nil:
+			sf.index = fi
+			dm.scalars = append(dm.scalars, *sf)
+		}
+	}
+
+	return dm
+}
+
+// fieldDefault returns the scalarField for field type ft.
+// sf will be nil if the field can not have a default.
+// nestedMessage will be true if this is a nested message.
+// Note that sf.index is not set on return.
+func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) {
+	var canHaveDefault bool
+	switch ft.Kind() {
+	case reflect.Struct:
+		nestedMessage = true // non-nullable
+
+	case reflect.Ptr:
+		if ft.Elem().Kind() == reflect.Struct {
+			nestedMessage = true
+		} else {
+			canHaveDefault = true // proto2 scalar field
+		}
+
+	case reflect.Slice:
+		switch ft.Elem().Kind() {
+		case reflect.Ptr, reflect.Struct:
+			nestedMessage = true // repeated message
+		case reflect.Uint8:
+			canHaveDefault = true // bytes field
+		}
+
+	case reflect.Map:
+		if ft.Elem().Kind() == reflect.Ptr {
+			nestedMessage = true // map with message values
+		}
+	}
+
+	if !canHaveDefault {
+		if nestedMessage {
+			return nil, true, nil
+		}
+		return nil, false, nil
+	}
+
+	// We now know that ft is a pointer or slice.
+	sf = &scalarField{kind: ft.Elem().Kind()}
+
+	// scalar fields without defaults
+	if !prop.HasDefault {
+		return sf, false, nil
+	}
+
+	// a scalar field: either *T or []byte
+	switch ft.Elem().Kind() {
+	case reflect.Bool:
+		x, err := strconv.ParseBool(prop.Default)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Float32:
+		x, err := strconv.ParseFloat(prop.Default, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err)
+		}
+		sf.value = float32(x)
+	case reflect.Float64:
+		x, err := strconv.ParseFloat(prop.Default, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.Int32:
+		x, err := strconv.ParseInt(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err)
+		}
+		sf.value = int32(x)
+	case reflect.Int64:
+		x, err := strconv.ParseInt(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	case reflect.String:
+		sf.value = prop.Default
+	case reflect.Uint8:
+		// []byte (not *uint8)
+		sf.value = []byte(prop.Default)
+	case reflect.Uint32:
+		x, err := strconv.ParseUint(prop.Default, 10, 32)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err)
+		}
+		sf.value = uint32(x)
+	case reflect.Uint64:
+		x, err := strconv.ParseUint(prop.Default, 10, 64)
+		if err != nil {
+			return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err)
+		}
+		sf.value = x
+	default:
+		return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind())
+	}
+
+	return sf, false, nil
+}
+
+// mapKeys returns a sort.Interface to be used for sorting the map keys.
+// Map fields may have key types of non-float scalars, strings and enums.
+func mapKeys(vs []reflect.Value) sort.Interface {
+	s := mapKeySorter{vs: vs}
+
+	// Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps.
+	if len(vs) == 0 {
+		return s
+	}
+	switch vs[0].Kind() {
+	case reflect.Int32, reflect.Int64:
+		s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() }
+	case reflect.Uint32, reflect.Uint64:
+		s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() }
+	case reflect.Bool:
+		s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true
+	case reflect.String:
+		s.less = func(a, b reflect.Value) bool { return a.String() < b.String() }
+	default:
+		panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind()))
+	}
+
+	return s
+}
+
+type mapKeySorter struct {
+	vs   []reflect.Value
+	less func(a, b reflect.Value) bool
+}
+
+func (s mapKeySorter) Len() int      { return len(s.vs) }
+func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] }
+func (s mapKeySorter) Less(i, j int) bool {
+	return s.less(s.vs[i], s.vs[j])
+}
+
+// isProto3Zero reports whether v is a zero proto3 value.
+func isProto3Zero(v reflect.Value) bool {
+	switch v.Kind() {
+	case reflect.Bool:
+		return !v.Bool()
+	case reflect.Int32, reflect.Int64:
+		return v.Int() == 0
+	case reflect.Uint32, reflect.Uint64:
+		return v.Uint() == 0
+	case reflect.Float32, reflect.Float64:
+		return v.Float() == 0
+	case reflect.String:
+		return v.String() == ""
+	}
+	return false
+}
+
+const (
+	// ProtoPackageIsVersion3 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	GoGoProtoPackageIsVersion3 = true
+
+	// ProtoPackageIsVersion2 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	GoGoProtoPackageIsVersion2 = true
+
+	// ProtoPackageIsVersion1 is referenced from generated protocol buffer files
+	// to assert that that code is compatible with this version of the proto package.
+	GoGoProtoPackageIsVersion1 = true
+)
+
+// InternalMessageInfo is a type used internally by generated .pb.go files.
+// This type is not intended to be used by non-generated code.
+// This type is not subject to any compatibility guarantee.
+type InternalMessageInfo struct {
+	marshal   *marshalInfo
+	unmarshal *unmarshalInfo
+	merge     *mergeInfo
+	discard   *discardInfo
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
new file mode 100644
index 0000000..b3aa391
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go
@@ -0,0 +1,50 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"encoding/json"
+	"strconv"
+)
+
+type Sizer interface {
+	Size() int
+}
+
+type ProtoSizer interface {
+	ProtoSize() int
+}
+
+func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) {
+	s, ok := m[value]
+	if !ok {
+		s = strconv.Itoa(int(value))
+	}
+	return json.Marshal(s)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go
new file mode 100644
index 0000000..f48a756
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/message_set.go
@@ -0,0 +1,181 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+	"errors"
+)
+
+// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var errNoMessageTypeID = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and messageSet)
+// model what the protocol compiler produces for the following protocol message:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+
+type _MessageSet_Item struct {
+	TypeId  *int32 `protobuf:"varint,2,req,name=type_id"`
+	Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type messageSet struct {
+	Item             []*_MessageSet_Item `protobuf:"group,1,rep"`
+	XXX_unrecognized []byte
+	// TODO: caching?
+}
+
+// Make sure messageSet is a Message.
+var _ Message = (*messageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+	MessageTypeId() int32
+}
+
+func (ms *messageSet) find(pb Message) *_MessageSet_Item {
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return nil
+	}
+	id := mti.MessageTypeId()
+	for _, item := range ms.Item {
+		if *item.TypeId == id {
+			return item
+		}
+	}
+	return nil
+}
+
+func (ms *messageSet) Has(pb Message) bool {
+	return ms.find(pb) != nil
+}
+
+func (ms *messageSet) Unmarshal(pb Message) error {
+	if item := ms.find(pb); item != nil {
+		return Unmarshal(item.Message, pb)
+	}
+	if _, ok := pb.(messageTypeIder); !ok {
+		return errNoMessageTypeID
+	}
+	return nil // TODO: return error instead?
+}
+
+func (ms *messageSet) Marshal(pb Message) error {
+	msg, err := Marshal(pb)
+	if err != nil {
+		return err
+	}
+	if item := ms.find(pb); item != nil {
+		// reuse existing item
+		item.Message = msg
+		return nil
+	}
+
+	mti, ok := pb.(messageTypeIder)
+	if !ok {
+		return errNoMessageTypeID
+	}
+
+	mtid := mti.MessageTypeId()
+	ms.Item = append(ms.Item, &_MessageSet_Item{
+		TypeId:  &mtid,
+		Message: msg,
+	})
+	return nil
+}
+
+func (ms *messageSet) Reset()         { *ms = messageSet{} }
+func (ms *messageSet) String() string { return CompactTextString(ms) }
+func (*messageSet) ProtoMessage()     {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+	i := 0
+	for ; buf[i]&0x80 != 0; i++ {
+	}
+	return buf[i+1:]
+}
+
+// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func unmarshalMessageSet(buf []byte, exts interface{}) error {
+	var m map[int32]Extension
+	switch exts := exts.(type) {
+	case *XXX_InternalExtensions:
+		m = exts.extensionsWrite()
+	case map[int32]Extension:
+		m = exts
+	default:
+		return errors.New("proto: not an extension map")
+	}
+
+	ms := new(messageSet)
+	if err := Unmarshal(buf, ms); err != nil {
+		return err
+	}
+	for _, item := range ms.Item {
+		id := *item.TypeId
+		msg := item.Message
+
+		// Restore wire type and field number varint, plus length varint.
+		// Be careful to preserve duplicate items.
+		b := EncodeVarint(uint64(id)<<3 | WireBytes)
+		if ext, ok := m[id]; ok {
+			// Existing data; rip off the tag and length varint
+			// so we join the new data correctly.
+			// We can assume that ext.enc is set because we are unmarshaling.
+			o := ext.enc[len(b):]   // skip wire type and field number
+			_, n := DecodeVarint(o) // calculate length of length varint
+			o = o[n:]               // skip length varint
+			msg = append(o, msg...) // join old data and new data
+		}
+		b = append(b, EncodeVarint(uint64(len(msg)))...)
+		b = append(b, msg...)
+
+		m[id] = Extension{enc: b}
+	}
+	return nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
new file mode 100644
index 0000000..b6cad90
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,357 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+	"sync"
+)
+
+const unsafeAllowed = false
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// zeroField is a noop when calling pointer.offset.
+var zeroField = field([]int{})
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// The pointer type is for the table-driven decoder.
+// The implementation here uses a reflect.Value of pointer type to
+// create a generic pointer. In pointer_unsafe.go we use unsafe
+// instead of reflect to implement the same (but faster) interface.
+type pointer struct {
+	v reflect.Value
+}
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	return pointer{v: reflect.ValueOf(*i)}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	v := reflect.ValueOf(*i)
+	u := reflect.New(v.Type())
+	u.Elem().Set(v)
+	return pointer{v: u}
+}
+
+// valToPointer converts v to a pointer.  v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{v: v}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	return pointer{v: p.v.Elem().FieldByIndex(f).Addr()}
+}
+
+func (p pointer) isNil() bool {
+	return p.v.IsNil()
+}
+
+// grow updates the slice s in place to make it one element longer.
+// s must be addressable.
+// Returns the (addressable) new element.
+func grow(s reflect.Value) reflect.Value {
+	n, m := s.Len(), s.Cap()
+	if n < m {
+		s.SetLen(n + 1)
+	} else {
+		s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem())))
+	}
+	return s.Index(n)
+}
+
+func (p pointer) toInt64() *int64 {
+	return p.v.Interface().(*int64)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return p.v.Interface().(**int64)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return p.v.Interface().(*[]int64)
+}
+
+var int32ptr = reflect.TypeOf((*int32)(nil))
+
+func (p pointer) toInt32() *int32 {
+	return p.v.Convert(int32ptr).Interface().(*int32)
+}
+
+// The toInt32Ptr/Slice methods don't work because of enums.
+// Instead, we must use set/get methods for the int32ptr/slice case.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return p.v.Interface().(**int32)
+}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return p.v.Interface().(*[]int32)
+}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().(*int32)
+	}
+	// an enum
+	return p.v.Elem().Convert(int32PtrType).Interface().(*int32)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	// Allocate value in a *int32. Possibly convert that to a *enum.
+	// Then assign it to a **int32 or **enum.
+	// Note: we can convert *int32 to *enum, but we can't convert
+	// **int32 to **enum!
+	p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem()))
+}
+
+// getInt32Slice copies []int32 from p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getInt32Slice() []int32 {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		return p.v.Elem().Interface().([]int32)
+	}
+	// an enum
+	// Allocate a []int32, then assign []enum's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := p.v.Elem()
+	s := make([]int32, slice.Len())
+	for i := 0; i < slice.Len(); i++ {
+		s[i] = int32(slice.Index(i).Int())
+	}
+	return s
+}
+
+// setInt32Slice copies []int32 into p as a new slice.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setInt32Slice(v []int32) {
+	if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) {
+		// raw int32 type
+		p.v.Elem().Set(reflect.ValueOf(v))
+		return
+	}
+	// an enum
+	// Allocate a []enum, then assign []int32's values into it.
+	// Note: we can't convert []enum to []int32.
+	slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v))
+	for i, x := range v {
+		slice.Index(i).SetInt(int64(x))
+	}
+	p.v.Elem().Set(slice)
+}
+func (p pointer) appendInt32Slice(v int32) {
+	grow(p.v.Elem()).SetInt(int64(v))
+}
+
+func (p pointer) toUint64() *uint64 {
+	return p.v.Interface().(*uint64)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return p.v.Interface().(**uint64)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return p.v.Interface().(*[]uint64)
+}
+func (p pointer) toUint32() *uint32 {
+	return p.v.Interface().(*uint32)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return p.v.Interface().(**uint32)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return p.v.Interface().(*[]uint32)
+}
+func (p pointer) toBool() *bool {
+	return p.v.Interface().(*bool)
+}
+func (p pointer) toBoolPtr() **bool {
+	return p.v.Interface().(**bool)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return p.v.Interface().(*[]bool)
+}
+func (p pointer) toFloat64() *float64 {
+	return p.v.Interface().(*float64)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return p.v.Interface().(**float64)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return p.v.Interface().(*[]float64)
+}
+func (p pointer) toFloat32() *float32 {
+	return p.v.Interface().(*float32)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return p.v.Interface().(**float32)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return p.v.Interface().(*[]float32)
+}
+func (p pointer) toString() *string {
+	return p.v.Interface().(*string)
+}
+func (p pointer) toStringPtr() **string {
+	return p.v.Interface().(**string)
+}
+func (p pointer) toStringSlice() *[]string {
+	return p.v.Interface().(*[]string)
+}
+func (p pointer) toBytes() *[]byte {
+	return p.v.Interface().(*[]byte)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return p.v.Interface().(*[][]byte)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return p.v.Interface().(*XXX_InternalExtensions)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return p.v.Interface().(*map[int32]Extension)
+}
+func (p pointer) getPointer() pointer {
+	return pointer{v: p.v.Elem()}
+}
+func (p pointer) setPointer(q pointer) {
+	p.v.Elem().Set(q.v)
+}
+func (p pointer) appendPointer(q pointer) {
+	grow(p.v.Elem()).Set(q.v)
+}
+
+// getPointerSlice copies []*T from p as a new []pointer.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) getPointerSlice() []pointer {
+	if p.v.IsNil() {
+		return nil
+	}
+	n := p.v.Elem().Len()
+	s := make([]pointer, n)
+	for i := 0; i < n; i++ {
+		s[i] = pointer{v: p.v.Elem().Index(i)}
+	}
+	return s
+}
+
+// setPointerSlice copies []pointer into p as a new []*T.
+// This behavior differs from the implementation in pointer_unsafe.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	if v == nil {
+		p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem())
+		return
+	}
+	s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v))
+	for _, p := range v {
+		s = reflect.Append(s, p.v)
+	}
+	p.v.Elem().Set(s)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	if p.v.Elem().IsNil() {
+		return pointer{v: p.v.Elem()}
+	}
+	return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct
+}
+
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	// TODO: check that p.v.Type().Elem() == t?
+	return p.v
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	return *p
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomicLock.Lock()
+	defer atomicLock.Unlock()
+	*p = v
+}
+
+var atomicLock sync.Mutex
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
new file mode 100644
index 0000000..7ffd3c2
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go
@@ -0,0 +1,59 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build purego appengine js
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+	"reflect"
+)
+
+// TODO: untested, so probably incorrect.
+
+func (p pointer) getRef() pointer {
+	return pointer{v: p.v.Addr()}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+	slice := p.getSlice(typ)
+	elem := v.asPointerTo(typ).Elem()
+	newSlice := reflect.Append(slice, elem)
+	slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+	sliceTyp := reflect.SliceOf(typ)
+	slice := p.asPointerTo(sliceTyp)
+	slice = slice.Elem()
+	return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 0000000..d55a335
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,308 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"sync/atomic"
+	"unsafe"
+)
+
+const unsafeAllowed = true
+
+// A field identifies a field in a struct, accessible from a pointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+	return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// zeroField is a noop when calling pointer.offset.
+const zeroField = field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+	return f != invalidField
+}
+
+// The pointer type below is for the new table-driven encoder/decoder.
+// The implementation here uses unsafe.Pointer to create a generic pointer.
+// In pointer_reflect.go we use reflect instead of unsafe to implement
+// the same (but slower) interface.
+type pointer struct {
+	p unsafe.Pointer
+}
+
+// size of pointer
+var ptrSize = unsafe.Sizeof(uintptr(0))
+
+// toPointer converts an interface of pointer type to a pointer
+// that points to the same target.
+func toPointer(i *Message) pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	// Saves ~25ns over the equivalent:
+	// return valToPointer(reflect.ValueOf(*i))
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// toAddrPointer converts an interface to a pointer that points to
+// the interface data.
+func toAddrPointer(i *interface{}, isptr bool) pointer {
+	// Super-tricky - read or get the address of data word of interface value.
+	if isptr {
+		// The interface is of pointer type, thus it is a direct interface.
+		// The data word is the pointer data itself. We take its address.
+		return pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)}
+	}
+	// The interface is not of pointer type. The data word is the pointer
+	// to the data.
+	return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]}
+}
+
+// valToPointer converts v to a pointer. v must be of pointer type.
+func valToPointer(v reflect.Value) pointer {
+	return pointer{p: unsafe.Pointer(v.Pointer())}
+}
+
+// offset converts from a pointer to a structure to a pointer to
+// one of its fields.
+func (p pointer) offset(f field) pointer {
+	// For safety, we should panic if !f.IsValid, however calling panic causes
+	// this to no longer be inlineable, which is a serious performance cost.
+	/*
+		if !f.IsValid() {
+			panic("invalid field")
+		}
+	*/
+	return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))}
+}
+
+func (p pointer) isNil() bool {
+	return p.p == nil
+}
+
+func (p pointer) toInt64() *int64 {
+	return (*int64)(p.p)
+}
+func (p pointer) toInt64Ptr() **int64 {
+	return (**int64)(p.p)
+}
+func (p pointer) toInt64Slice() *[]int64 {
+	return (*[]int64)(p.p)
+}
+func (p pointer) toInt32() *int32 {
+	return (*int32)(p.p)
+}
+
+// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist.
+/*
+	func (p pointer) toInt32Ptr() **int32 {
+		return (**int32)(p.p)
+	}
+	func (p pointer) toInt32Slice() *[]int32 {
+		return (*[]int32)(p.p)
+	}
+*/
+func (p pointer) getInt32Ptr() *int32 {
+	return *(**int32)(p.p)
+}
+func (p pointer) setInt32Ptr(v int32) {
+	*(**int32)(p.p) = &v
+}
+
+// getInt32Slice loads a []int32 from p.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getInt32Slice() []int32 {
+	return *(*[]int32)(p.p)
+}
+
+// setInt32Slice stores a []int32 to p.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setInt32Slice(v []int32) {
+	*(*[]int32)(p.p) = v
+}
+
+// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead?
+func (p pointer) appendInt32Slice(v int32) {
+	s := (*[]int32)(p.p)
+	*s = append(*s, v)
+}
+
+func (p pointer) toUint64() *uint64 {
+	return (*uint64)(p.p)
+}
+func (p pointer) toUint64Ptr() **uint64 {
+	return (**uint64)(p.p)
+}
+func (p pointer) toUint64Slice() *[]uint64 {
+	return (*[]uint64)(p.p)
+}
+func (p pointer) toUint32() *uint32 {
+	return (*uint32)(p.p)
+}
+func (p pointer) toUint32Ptr() **uint32 {
+	return (**uint32)(p.p)
+}
+func (p pointer) toUint32Slice() *[]uint32 {
+	return (*[]uint32)(p.p)
+}
+func (p pointer) toBool() *bool {
+	return (*bool)(p.p)
+}
+func (p pointer) toBoolPtr() **bool {
+	return (**bool)(p.p)
+}
+func (p pointer) toBoolSlice() *[]bool {
+	return (*[]bool)(p.p)
+}
+func (p pointer) toFloat64() *float64 {
+	return (*float64)(p.p)
+}
+func (p pointer) toFloat64Ptr() **float64 {
+	return (**float64)(p.p)
+}
+func (p pointer) toFloat64Slice() *[]float64 {
+	return (*[]float64)(p.p)
+}
+func (p pointer) toFloat32() *float32 {
+	return (*float32)(p.p)
+}
+func (p pointer) toFloat32Ptr() **float32 {
+	return (**float32)(p.p)
+}
+func (p pointer) toFloat32Slice() *[]float32 {
+	return (*[]float32)(p.p)
+}
+func (p pointer) toString() *string {
+	return (*string)(p.p)
+}
+func (p pointer) toStringPtr() **string {
+	return (**string)(p.p)
+}
+func (p pointer) toStringSlice() *[]string {
+	return (*[]string)(p.p)
+}
+func (p pointer) toBytes() *[]byte {
+	return (*[]byte)(p.p)
+}
+func (p pointer) toBytesSlice() *[][]byte {
+	return (*[][]byte)(p.p)
+}
+func (p pointer) toExtensions() *XXX_InternalExtensions {
+	return (*XXX_InternalExtensions)(p.p)
+}
+func (p pointer) toOldExtensions() *map[int32]Extension {
+	return (*map[int32]Extension)(p.p)
+}
+
+// getPointerSlice loads []*T from p as a []pointer.
+// The value returned is aliased with the original slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) getPointerSlice() []pointer {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We load it as []pointer.
+	return *(*[]pointer)(p.p)
+}
+
+// setPointerSlice stores []pointer into p as a []*T.
+// The value set is aliased with the input slice.
+// This behavior differs from the implementation in pointer_reflect.go.
+func (p pointer) setPointerSlice(v []pointer) {
+	// Super-tricky - p should point to a []*T where T is a
+	// message type. We store it as []pointer.
+	*(*[]pointer)(p.p) = v
+}
+
+// getPointer loads the pointer at p and returns it.
+func (p pointer) getPointer() pointer {
+	return pointer{p: *(*unsafe.Pointer)(p.p)}
+}
+
+// setPointer stores the pointer q at p.
+func (p pointer) setPointer(q pointer) {
+	*(*unsafe.Pointer)(p.p) = q.p
+}
+
+// append q to the slice pointed to by p.
+func (p pointer) appendPointer(q pointer) {
+	s := (*[]unsafe.Pointer)(p.p)
+	*s = append(*s, q.p)
+}
+
+// getInterfacePointer returns a pointer that points to the
+// interface data of the interface pointed by p.
+func (p pointer) getInterfacePointer() pointer {
+	// Super-tricky - read pointer out of data word of interface value.
+	return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]}
+}
+
+// asPointerTo returns a reflect.Value that is a pointer to an
+// object of type t stored at p.
+func (p pointer) asPointerTo(t reflect.Type) reflect.Value {
+	return reflect.NewAt(t, p.p)
+}
+
+func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo {
+	return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo {
+	return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo {
+	return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
+func atomicLoadDiscardInfo(p **discardInfo) *discardInfo {
+	return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p))))
+}
+func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) {
+	atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v))
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
new file mode 100644
index 0000000..aca8eed
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go
@@ -0,0 +1,56 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !purego,!appengine,!js
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+	"reflect"
+	"unsafe"
+)
+
+func (p pointer) getRef() pointer {
+	return pointer{p: (unsafe.Pointer)(&p.p)}
+}
+
+func (p pointer) appendRef(v pointer, typ reflect.Type) {
+	slice := p.getSlice(typ)
+	elem := v.asPointerTo(typ).Elem()
+	newSlice := reflect.Append(slice, elem)
+	slice.Set(newSlice)
+}
+
+func (p pointer) getSlice(typ reflect.Type) reflect.Value {
+	sliceTyp := reflect.SliceOf(typ)
+	slice := p.asPointerTo(sliceTyp)
+	slice = slice.Elem()
+	return slice
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go
new file mode 100644
index 0000000..28da147
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties.go
@@ -0,0 +1,610 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+	"fmt"
+	"log"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+	WireVarint     = 0
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+	WireFixed32    = 5
+)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+	fastTags []int
+	slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+	if t > 0 && t < tagMapFastLimit {
+		if t >= len(p.fastTags) {
+			return 0, false
+		}
+		fi := p.fastTags[t]
+		return fi, fi >= 0
+	}
+	fi, ok := p.slowTags[t]
+	return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+	if t > 0 && t < tagMapFastLimit {
+		for len(p.fastTags) < t+1 {
+			p.fastTags = append(p.fastTags, -1)
+		}
+		p.fastTags[t] = fi
+		return
+	}
+	if p.slowTags == nil {
+		p.slowTags = make(map[int]int)
+	}
+	p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+	Prop             []*Properties  // properties for each field
+	reqCount         int            // required count
+	decoderTags      tagMap         // map from proto tag to struct field number
+	decoderOrigNames map[string]int // map from original name to struct field number
+	order            []int          // list of struct field numbers in tag order
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the original name of a field.
+	OneofTypes map[string]*OneofProperties
+}
+
+// OneofProperties represents information about a specific field in a oneof.
+type OneofProperties struct {
+	Type  reflect.Type // pointer to generated struct type for this oneof field
+	Field int          // struct field number of the containing oneof in the message
+	Prop  *Properties
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+	return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+	Name     string // name of the field, for error messages
+	OrigName string // original name before protocol compiler (always set)
+	JSONName string // name to use for JSON; determined by protoc
+	Wire     string
+	WireType int
+	Tag      int
+	Required bool
+	Optional bool
+	Repeated bool
+	Packed   bool   // relevant for repeated primitives only
+	Enum     string // set for enum types only
+	proto3   bool   // whether this is known to be a proto3 field
+	oneof    bool   // whether this is a oneof field
+
+	Default     string // default value
+	HasDefault  bool   // whether an explicit default was provided
+	CustomType  string
+	CastType    string
+	StdTime     bool
+	StdDuration bool
+	WktPointer  bool
+
+	stype reflect.Type      // set for struct types only
+	ctype reflect.Type      // set for custom types only
+	sprop *StructProperties // set for struct types only
+
+	mtype      reflect.Type // set for map types only
+	MapKeyProp *Properties  // set for map types only
+	MapValProp *Properties  // set for map types only
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += ","
+	s += strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != p.OrigName {
+		s += ",json=" + p.JSONName
+	}
+	if p.proto3 {
+		s += ",proto3"
+	}
+	if p.oneof {
+		s += ",oneof"
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+	// "bytes,49,opt,name=foo,def=hello!"
+	fields := strings.Split(s, ",") // breaks def=, but handled below.
+	if len(fields) < 2 {
+		log.Printf("proto: tag has too few fields: %q", s)
+		return
+	}
+
+	p.Wire = fields[0]
+	switch p.Wire {
+	case "varint":
+		p.WireType = WireVarint
+	case "fixed32":
+		p.WireType = WireFixed32
+	case "fixed64":
+		p.WireType = WireFixed64
+	case "zigzag32":
+		p.WireType = WireVarint
+	case "zigzag64":
+		p.WireType = WireVarint
+	case "bytes", "group":
+		p.WireType = WireBytes
+		// no numeric converter for non-numeric types
+	default:
+		log.Printf("proto: tag has unknown wire type: %q", s)
+		return
+	}
+
+	var err error
+	p.Tag, err = strconv.Atoi(fields[1])
+	if err != nil {
+		return
+	}
+
+outer:
+	for i := 2; i < len(fields); i++ {
+		f := fields[i]
+		switch {
+		case f == "req":
+			p.Required = true
+		case f == "opt":
+			p.Optional = true
+		case f == "rep":
+			p.Repeated = true
+		case f == "packed":
+			p.Packed = true
+		case strings.HasPrefix(f, "name="):
+			p.OrigName = f[5:]
+		case strings.HasPrefix(f, "json="):
+			p.JSONName = f[5:]
+		case strings.HasPrefix(f, "enum="):
+			p.Enum = f[5:]
+		case f == "proto3":
+			p.proto3 = true
+		case f == "oneof":
+			p.oneof = true
+		case strings.HasPrefix(f, "def="):
+			p.HasDefault = true
+			p.Default = f[4:] // rest of string
+			if i+1 < len(fields) {
+				// Commas aren't escaped, and def is always last.
+				p.Default += "," + strings.Join(fields[i+1:], ",")
+				break outer
+			}
+		case strings.HasPrefix(f, "embedded="):
+			p.OrigName = strings.Split(f, "=")[1]
+		case strings.HasPrefix(f, "customtype="):
+			p.CustomType = strings.Split(f, "=")[1]
+		case strings.HasPrefix(f, "casttype="):
+			p.CastType = strings.Split(f, "=")[1]
+		case f == "stdtime":
+			p.StdTime = true
+		case f == "stdduration":
+			p.StdDuration = true
+		case f == "wktptr":
+			p.WktPointer = true
+		}
+	}
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// setFieldProps initializes the field properties for submessages and maps.
+func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+	isMap := typ.Kind() == reflect.Map
+	if len(p.CustomType) > 0 && !isMap {
+		p.ctype = typ
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdTime && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.StdDuration && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	if p.WktPointer && !isMap {
+		p.setTag(lockGetProp)
+		return
+	}
+	switch t1 := typ; t1.Kind() {
+	case reflect.Struct:
+		p.stype = typ
+	case reflect.Ptr:
+		if t1.Elem().Kind() == reflect.Struct {
+			p.stype = t1.Elem()
+		}
+	case reflect.Slice:
+		switch t2 := t1.Elem(); t2.Kind() {
+		case reflect.Ptr:
+			switch t3 := t2.Elem(); t3.Kind() {
+			case reflect.Struct:
+				p.stype = t3
+			}
+		case reflect.Struct:
+			p.stype = t2
+		}
+
+	case reflect.Map:
+
+		p.mtype = t1
+		p.MapKeyProp = &Properties{}
+		p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+		p.MapValProp = &Properties{}
+		vtype := p.mtype.Elem()
+		if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+			// The value type is not a message (*T) or bytes ([]byte),
+			// so we need encoders for the pointer to this type.
+			vtype = reflect.PtrTo(vtype)
+		}
+
+		p.MapValProp.CustomType = p.CustomType
+		p.MapValProp.StdDuration = p.StdDuration
+		p.MapValProp.StdTime = p.StdTime
+		p.MapValProp.WktPointer = p.WktPointer
+		p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+	}
+	p.setTag(lockGetProp)
+}
+
+func (p *Properties) setTag(lockGetProp bool) {
+	if p.stype != nil {
+		if lockGetProp {
+			p.sprop = GetProperties(p.stype)
+		} else {
+			p.sprop = getPropertiesLocked(p.stype)
+		}
+	}
+}
+
+var (
+	marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+)
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+	// "bytes,49,opt,def=hello!"
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+	p.setFieldProps(typ, f, lockGetProp)
+}
+
+var (
+	propertiesMu  sync.RWMutex
+	propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic("proto: type must have kind struct")
+	}
+
+	// Most calls to GetProperties in a long-running program will be
+	// retrieving details for types we have seen before.
+	propertiesMu.RLock()
+	sprop, ok := propertiesMap[t]
+	propertiesMu.RUnlock()
+	if ok {
+		return sprop
+	}
+
+	propertiesMu.Lock()
+	sprop = getPropertiesLocked(t)
+	propertiesMu.Unlock()
+	return sprop
+}
+
+type (
+	oneofFuncsIface interface {
+		XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{})
+	}
+	oneofWrappersIface interface {
+		XXX_OneofWrappers() []interface{}
+	}
+)
+
+// getPropertiesLocked requires that propertiesMu is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+	if prop, ok := propertiesMap[t]; ok {
+		return prop
+	}
+
+	prop := new(StructProperties)
+	// in case of recursive protos, fill this in now.
+	propertiesMap[t] = prop
+
+	// build properties
+	prop.Prop = make([]*Properties, t.NumField())
+	prop.order = make([]int, t.NumField())
+
+	isOneofMessage := false
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		p := new(Properties)
+		name := f.Name
+		p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+		oneof := f.Tag.Get("protobuf_oneof") // special case
+		if oneof != "" {
+			isOneofMessage = true
+			// Oneof fields don't use the traditional protobuf tag.
+			p.OrigName = oneof
+		}
+		prop.Prop[i] = p
+		prop.order[i] = i
+		if debug {
+			print(i, " ", f.Name, " ", t.String(), " ")
+			if p.Tag > 0 {
+				print(p.String())
+			}
+			print("\n")
+		}
+	}
+
+	// Re-order prop.order.
+	sort.Sort(prop)
+
+	if isOneofMessage {
+		var oots []interface{}
+		switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+		case oneofFuncsIface:
+			_, _, _, oots = m.XXX_OneofFuncs()
+		case oneofWrappersIface:
+			oots = m.XXX_OneofWrappers()
+		}
+		if len(oots) > 0 {
+			// Interpret oneof metadata.
+			prop.OneofTypes = make(map[string]*OneofProperties)
+			for _, oot := range oots {
+				oop := &OneofProperties{
+					Type: reflect.ValueOf(oot).Type(), // *T
+					Prop: new(Properties),
+				}
+				sft := oop.Type.Elem().Field(0)
+				oop.Prop.Name = sft.Name
+				oop.Prop.Parse(sft.Tag.Get("protobuf"))
+				// There will be exactly one interface field that
+				// this new value is assignable to.
+				for i := 0; i < t.NumField(); i++ {
+					f := t.Field(i)
+					if f.Type.Kind() != reflect.Interface {
+						continue
+					}
+					if !oop.Type.AssignableTo(f.Type) {
+						continue
+					}
+					oop.Field = i
+					break
+				}
+				prop.OneofTypes[oop.Prop.OrigName] = oop
+			}
+		}
+	}
+
+	// build required counts
+	// build tags
+	reqCount := 0
+	prop.decoderOrigNames = make(map[string]int)
+	for i, p := range prop.Prop {
+		if strings.HasPrefix(p.Name, "XXX_") {
+			// Internal fields should not appear in tags/origNames maps.
+			// They are handled specially when encoding and decoding.
+			continue
+		}
+		if p.Required {
+			reqCount++
+		}
+		prop.decoderTags.put(p.Tag, i)
+		prop.decoderOrigNames[p.OrigName] = i
+	}
+	prop.reqCount = reqCount
+
+	return prop
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+var enumStringMaps = make(map[string]map[int32]string)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+	if _, ok := enumValueMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumValueMaps[typeName] = valueMap
+	if _, ok := enumStringMaps[typeName]; ok {
+		panic("proto: duplicate enum registered: " + typeName)
+	}
+	enumStringMaps[typeName] = unusedNameMap
+}
+
+// EnumValueMap returns the mapping from names to integers of the
+// enum type enumType, or a nil if not found.
+func EnumValueMap(enumType string) map[string]int32 {
+	return enumValueMaps[enumType]
+}
+
+// A registry of all linked message types.
+// The string is a fully-qualified proto name ("pkg.Message").
+var (
+	protoTypedNils = make(map[string]Message)      // a map from proto names to typed nil pointers
+	protoMapTypes  = make(map[string]reflect.Type) // a map from proto names to map types
+	revProtoTypes  = make(map[reflect.Type]string)
+)
+
+// RegisterType is called from generated code and maps from the fully qualified
+// proto name to the type (pointer to struct) of the protocol buffer.
+func RegisterType(x Message, name string) {
+	if _, ok := protoTypedNils[name]; ok {
+		// TODO: Some day, make this a panic.
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 {
+		// Generated code always calls RegisterType with nil x.
+		// This check is just for extra safety.
+		protoTypedNils[name] = x
+	} else {
+		protoTypedNils[name] = reflect.Zero(t).Interface().(Message)
+	}
+	revProtoTypes[t] = name
+}
+
+// RegisterMapType is called from generated code and maps from the fully qualified
+// proto name to the native map type of the proto map definition.
+func RegisterMapType(x interface{}, name string) {
+	if reflect.TypeOf(x).Kind() != reflect.Map {
+		panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name))
+	}
+	if _, ok := protoMapTypes[name]; ok {
+		log.Printf("proto: duplicate proto type registered: %s", name)
+		return
+	}
+	t := reflect.TypeOf(x)
+	protoMapTypes[name] = t
+	revProtoTypes[t] = name
+}
+
+// MessageName returns the fully-qualified proto name for the given message type.
+func MessageName(x Message) string {
+	type xname interface {
+		XXX_MessageName() string
+	}
+	if m, ok := x.(xname); ok {
+		return m.XXX_MessageName()
+	}
+	return revProtoTypes[reflect.TypeOf(x)]
+}
+
+// MessageType returns the message type (pointer to struct) for a named message.
+// The type is not guaranteed to implement proto.Message if the name refers to a
+// map entry.
+func MessageType(name string) reflect.Type {
+	if t, ok := protoTypedNils[name]; ok {
+		return reflect.TypeOf(t)
+	}
+	return protoMapTypes[name]
+}
+
+// A registry of all linked proto files.
+var (
+	protoFiles = make(map[string][]byte) // file name => fileDescriptor
+)
+
+// RegisterFile is called from generated code and maps from the
+// full file name of a .proto file to its compressed FileDescriptorProto.
+func RegisterFile(filename string, fileDescriptor []byte) {
+	protoFiles[filename] = fileDescriptor
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto for a .proto file.
+func FileDescriptor(filename string) []byte { return protoFiles[filename] }
diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
new file mode 100644
index 0000000..40ea3dd
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go
@@ -0,0 +1,36 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+)
+
+var sizerType = reflect.TypeOf((*Sizer)(nil)).Elem()
+var protosizerType = reflect.TypeOf((*ProtoSizer)(nil)).Elem()
diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
new file mode 100644
index 0000000..5a5fd93
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go
@@ -0,0 +1,119 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"io"
+)
+
+func Skip(data []byte) (n int, err error) {
+	l := len(data)
+	index := 0
+	for index < l {
+		var wire uint64
+		for shift := uint(0); ; shift += 7 {
+			if index >= l {
+				return 0, io.ErrUnexpectedEOF
+			}
+			b := data[index]
+			index++
+			wire |= (uint64(b) & 0x7F) << shift
+			if b < 0x80 {
+				break
+			}
+		}
+		wireType := int(wire & 0x7)
+		switch wireType {
+		case 0:
+			for {
+				if index >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				index++
+				if data[index-1] < 0x80 {
+					break
+				}
+			}
+			return index, nil
+		case 1:
+			index += 8
+			return index, nil
+		case 2:
+			var length int
+			for shift := uint(0); ; shift += 7 {
+				if index >= l {
+					return 0, io.ErrUnexpectedEOF
+				}
+				b := data[index]
+				index++
+				length |= (int(b) & 0x7F) << shift
+				if b < 0x80 {
+					break
+				}
+			}
+			index += length
+			return index, nil
+		case 3:
+			for {
+				var innerWire uint64
+				var start int = index
+				for shift := uint(0); ; shift += 7 {
+					if index >= l {
+						return 0, io.ErrUnexpectedEOF
+					}
+					b := data[index]
+					index++
+					innerWire |= (uint64(b) & 0x7F) << shift
+					if b < 0x80 {
+						break
+					}
+				}
+				innerWireType := int(innerWire & 0x7)
+				if innerWireType == 4 {
+					break
+				}
+				next, err := Skip(data[start:])
+				if err != nil {
+					return 0, err
+				}
+				index = start + next
+			}
+			return index, nil
+		case 4:
+			return index, nil
+		case 5:
+			index += 4
+			return index, nil
+		default:
+			return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
+		}
+	}
+	panic("unreachable")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal.go b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
new file mode 100644
index 0000000..f8babde
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal.go
@@ -0,0 +1,3009 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"math"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// a sizer takes a pointer to a field and the size of its tag, computes the size of
+// the encoded data.
+type sizer func(pointer, int) int
+
+// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format),
+// marshals the field to the end of the slice, returns the slice and error (if any).
+type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error)
+
+// marshalInfo is the information used for marshaling a message.
+type marshalInfo struct {
+	typ          reflect.Type
+	fields       []*marshalFieldInfo
+	unrecognized field                      // offset of XXX_unrecognized
+	extensions   field                      // offset of XXX_InternalExtensions
+	v1extensions field                      // offset of XXX_extensions
+	sizecache    field                      // offset of XXX_sizecache
+	initialized  int32                      // 0 -- only typ is set, 1 -- fully initialized
+	messageset   bool                       // uses message set wire format
+	hasmarshaler bool                       // has custom marshaler
+	sync.RWMutex                            // protect extElems map, also for initialization
+	extElems     map[int32]*marshalElemInfo // info of extension elements
+
+	hassizer      bool // has custom sizer
+	hasprotosizer bool // has custom protosizer
+
+	bytesExtensions field // offset of XXX_extensions where the field type is []byte
+}
+
+// marshalFieldInfo is the information used for marshaling a field of a message.
+type marshalFieldInfo struct {
+	field      field
+	wiretag    uint64 // tag in wire format
+	tagsize    int    // size of tag in wire format
+	sizer      sizer
+	marshaler  marshaler
+	isPointer  bool
+	required   bool                              // field is required
+	name       string                            // name of the field, for error reporting
+	oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements
+}
+
+// marshalElemInfo is the information used for marshaling an extension or oneof element.
+type marshalElemInfo struct {
+	wiretag   uint64 // tag in wire format
+	tagsize   int    // size of tag in wire format
+	sizer     sizer
+	marshaler marshaler
+	isptr     bool // elem is pointer typed, thus interface of this type is a direct interface (extension only)
+}
+
+var (
+	marshalInfoMap  = map[reflect.Type]*marshalInfo{}
+	marshalInfoLock sync.Mutex
+
+	uint8SliceType = reflect.TypeOf(([]uint8)(nil)).Kind()
+)
+
+// getMarshalInfo returns the information to marshal a given type of message.
+// The info it returns may not necessarily initialized.
+// t is the type of the message (NOT the pointer to it).
+func getMarshalInfo(t reflect.Type) *marshalInfo {
+	marshalInfoLock.Lock()
+	u, ok := marshalInfoMap[t]
+	if !ok {
+		u = &marshalInfo{typ: t}
+		marshalInfoMap[t] = u
+	}
+	marshalInfoLock.Unlock()
+	return u
+}
+
+// Size is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It computes the size of encoded data of msg.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Size(msg Message) int {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return 0
+	}
+	return u.size(ptr)
+}
+
+// Marshal is the entry point from generated code,
+// and should be ONLY called by generated code.
+// It marshals msg to the end of b.
+// a is a pointer to a place to store cached marshal info.
+func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) {
+	u := getMessageMarshalInfo(msg, a)
+	ptr := toPointer(&msg)
+	if ptr.isNil() {
+		// We get here if msg is a typed nil ((*SomeMessage)(nil)),
+		// so it satisfies the interface, and msg == nil wouldn't
+		// catch it. We don't want crash in this case.
+		return b, ErrNil
+	}
+	return u.marshal(b, ptr, deterministic)
+}
+
+func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo {
+	// u := a.marshal, but atomically.
+	// We use an atomic here to ensure memory consistency.
+	u := atomicLoadMarshalInfo(&a.marshal)
+	if u == nil {
+		// Get marshal information from type of message.
+		t := reflect.ValueOf(msg).Type()
+		if t.Kind() != reflect.Ptr {
+			panic(fmt.Sprintf("cannot handle non-pointer message type %v", t))
+		}
+		u = getMarshalInfo(t.Elem())
+		// Store it in the cache for later users.
+		// a.marshal = u, but atomically.
+		atomicStoreMarshalInfo(&a.marshal, u)
+	}
+	return u
+}
+
+// size is the main function to compute the size of the encoded data of a message.
+// ptr is the pointer to the message.
+func (u *marshalInfo) size(ptr pointer) int {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		// Uses the message's Size method if available
+		if u.hassizer {
+			s := ptr.asPointerTo(u.typ).Interface().(Sizer)
+			return s.Size()
+		}
+		// Uses the message's ProtoSize method if available
+		if u.hasprotosizer {
+			s := ptr.asPointerTo(u.typ).Interface().(ProtoSizer)
+			return s.ProtoSize()
+		}
+
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b, _ := m.Marshal()
+		return len(b)
+	}
+
+	n := 0
+	for _, f := range u.fields {
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		n += f.sizer(ptr.offset(f.field), f.tagsize)
+	}
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			n += u.sizeMessageSet(e)
+		} else {
+			n += u.sizeExtensions(e)
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		n += u.sizeV1Extensions(m)
+	}
+	if u.bytesExtensions.IsValid() {
+		s := *ptr.offset(u.bytesExtensions).toBytes()
+		n += len(s)
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		n += len(s)
+	}
+
+	// cache the result for use in marshal
+	if u.sizecache.IsValid() {
+		atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n))
+	}
+	return n
+}
+
+// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated),
+// fall back to compute the size.
+func (u *marshalInfo) cachedsize(ptr pointer) int {
+	if u.sizecache.IsValid() {
+		return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32()))
+	}
+	return u.size(ptr)
+}
+
+// marshal is the main function to marshal a message. It takes a byte slice and appends
+// the encoded data to the end of the slice, returns the slice and error (if any).
+// ptr is the pointer to the message.
+// If deterministic is true, map is marshaled in deterministic order.
+func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeMarshalInfo()
+	}
+
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if u.hasmarshaler {
+		m := ptr.asPointerTo(u.typ).Interface().(Marshaler)
+		b1, err := m.Marshal()
+		b = append(b, b1...)
+		return b, err
+	}
+
+	var err, errLater error
+	// The old marshaler encodes extensions at beginning.
+	if u.extensions.IsValid() {
+		e := ptr.offset(u.extensions).toExtensions()
+		if u.messageset {
+			b, err = u.appendMessageSet(b, e, deterministic)
+		} else {
+			b, err = u.appendExtensions(b, e, deterministic)
+		}
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.v1extensions.IsValid() {
+		m := *ptr.offset(u.v1extensions).toOldExtensions()
+		b, err = u.appendV1Extensions(b, m, deterministic)
+		if err != nil {
+			return b, err
+		}
+	}
+	if u.bytesExtensions.IsValid() {
+		s := *ptr.offset(u.bytesExtensions).toBytes()
+		b = append(b, s...)
+	}
+	for _, f := range u.fields {
+		if f.required {
+			if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+				// Required field is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name}
+				}
+				continue
+			}
+		}
+		if f.isPointer && ptr.offset(f.field).getPointer().isNil() {
+			// nil pointer always marshals to nothing
+			continue
+		}
+		b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic)
+		if err != nil {
+			if err1, ok := err.(*RequiredNotSetError); ok {
+				// Required field in submessage is not set.
+				// We record the error but keep going, to give a complete marshaling.
+				if errLater == nil {
+					errLater = &RequiredNotSetError{f.name + "." + err1.field}
+				}
+				continue
+			}
+			if err == errRepeatedHasNil {
+				err = errors.New("proto: repeated field " + f.name + " has nil element")
+			}
+			if err == errInvalidUTF8 {
+				if errLater == nil {
+					fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+					errLater = &invalidUTF8Error{fullName}
+				}
+				continue
+			}
+			return b, err
+		}
+	}
+	if u.unrecognized.IsValid() {
+		s := *ptr.offset(u.unrecognized).toBytes()
+		b = append(b, s...)
+	}
+	return b, errLater
+}
+
+// computeMarshalInfo initializes the marshal info.
+func (u *marshalInfo) computeMarshalInfo() {
+	u.Lock()
+	defer u.Unlock()
+	if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock
+		return
+	}
+
+	t := u.typ
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.v1extensions = invalidField
+	u.bytesExtensions = invalidField
+	u.sizecache = invalidField
+	isOneofMessage := false
+
+	if reflect.PtrTo(t).Implements(sizerType) {
+		u.hassizer = true
+	}
+	if reflect.PtrTo(t).Implements(protosizerType) {
+		u.hasprotosizer = true
+	}
+	// If the message can marshal itself, let it do it, for compatibility.
+	// NOTE: This is not efficient.
+	if reflect.PtrTo(t).Implements(marshalerType) {
+		u.hasmarshaler = true
+		atomic.StoreInt32(&u.initialized, 1)
+		return
+	}
+
+	n := t.NumField()
+
+	// deal with XXX fields first
+	for i := 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			isOneofMessage = true
+		}
+		if !strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		switch f.Name {
+		case "XXX_sizecache":
+			u.sizecache = toField(&f)
+		case "XXX_unrecognized":
+			u.unrecognized = toField(&f)
+		case "XXX_InternalExtensions":
+			u.extensions = toField(&f)
+			u.messageset = f.Tag.Get("protobuf_messageset") == "1"
+		case "XXX_extensions":
+			if f.Type.Kind() == reflect.Map {
+				u.v1extensions = toField(&f)
+			} else {
+				u.bytesExtensions = toField(&f)
+			}
+		case "XXX_NoUnkeyedLiteral":
+			// nothing to do
+		default:
+			panic("unknown XXX field: " + f.Name)
+		}
+		n--
+	}
+
+	// get oneof implementers
+	var oneofImplementers []interface{}
+	// gogo: isOneofMessage is needed for embedded oneof messages, without a marshaler and unmarshaler
+	if isOneofMessage {
+		switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+		case oneofFuncsIface:
+			_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+		case oneofWrappersIface:
+			oneofImplementers = m.XXX_OneofWrappers()
+		}
+	}
+
+	// normal fields
+	fields := make([]marshalFieldInfo, n) // batch allocation
+	u.fields = make([]*marshalFieldInfo, 0, n)
+	for i, j := 0, 0; i < t.NumField(); i++ {
+		f := t.Field(i)
+
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+		field := &fields[j]
+		j++
+		field.name = f.Name
+		u.fields = append(u.fields, field)
+		if f.Tag.Get("protobuf_oneof") != "" {
+			field.computeOneofFieldInfo(&f, oneofImplementers)
+			continue
+		}
+		if f.Tag.Get("protobuf") == "" {
+			// field has no tag (not in generated message), ignore it
+			u.fields = u.fields[:len(u.fields)-1]
+			j--
+			continue
+		}
+		field.computeMarshalFieldInfo(&f)
+	}
+
+	// fields are marshaled in tag order on the wire.
+	sort.Sort(byTag(u.fields))
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// helper for sorting fields by tag
+type byTag []*marshalFieldInfo
+
+func (a byTag) Len() int           { return len(a) }
+func (a byTag) Swap(i, j int)      { a[i], a[j] = a[j], a[i] }
+func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag }
+
+// getExtElemInfo returns the information to marshal an extension element.
+// The info it returns is initialized.
+func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo {
+	// get from cache first
+	u.RLock()
+	e, ok := u.extElems[desc.Field]
+	u.RUnlock()
+	if ok {
+		return e
+	}
+
+	t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct
+	tags := strings.Split(desc.Tag, ",")
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	sizr, marshalr := typeMarshaler(t, tags, false, false)
+	e = &marshalElemInfo{
+		wiretag:   uint64(tag)<<3 | wt,
+		tagsize:   SizeVarint(uint64(tag) << 3),
+		sizer:     sizr,
+		marshaler: marshalr,
+		isptr:     t.Kind() == reflect.Ptr,
+	}
+
+	// update cache
+	u.Lock()
+	if u.extElems == nil {
+		u.extElems = make(map[int32]*marshalElemInfo)
+	}
+	u.extElems[desc.Field] = e
+	u.Unlock()
+	return e
+}
+
+// computeMarshalFieldInfo fills up the information to marshal a field.
+func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) {
+	// parse protobuf tag of the field.
+	// tag has format of "bytes,49,opt,name=foo,def=hello!"
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	if tags[0] == "" {
+		return
+	}
+	tag, err := strconv.Atoi(tags[1])
+	if err != nil {
+		panic("tag is not an integer")
+	}
+	wt := wiretype(tags[0])
+	if tags[2] == "req" {
+		fi.required = true
+	}
+	fi.setTag(f, tag, wt)
+	fi.setMarshaler(f, tags)
+}
+
+func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) {
+	fi.field = toField(f)
+	fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire.
+	fi.isPointer = true
+	fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f)
+	fi.oneofElems = make(map[reflect.Type]*marshalElemInfo)
+
+	ityp := f.Type // interface type
+	for _, o := range oneofImplementers {
+		t := reflect.TypeOf(o)
+		if !t.Implements(ityp) {
+			continue
+		}
+		sf := t.Elem().Field(0) // oneof implementer is a struct with a single field
+		tags := strings.Split(sf.Tag.Get("protobuf"), ",")
+		tag, err := strconv.Atoi(tags[1])
+		if err != nil {
+			panic("tag is not an integer")
+		}
+		wt := wiretype(tags[0])
+		sizr, marshalr := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value
+		fi.oneofElems[t.Elem()] = &marshalElemInfo{
+			wiretag:   uint64(tag)<<3 | wt,
+			tagsize:   SizeVarint(uint64(tag) << 3),
+			sizer:     sizr,
+			marshaler: marshalr,
+		}
+	}
+}
+
+// wiretype returns the wire encoding of the type.
+func wiretype(encoding string) uint64 {
+	switch encoding {
+	case "fixed32":
+		return WireFixed32
+	case "fixed64":
+		return WireFixed64
+	case "varint", "zigzag32", "zigzag64":
+		return WireVarint
+	case "bytes":
+		return WireBytes
+	case "group":
+		return WireStartGroup
+	}
+	panic("unknown wire type " + encoding)
+}
+
+// setTag fills up the tag (in wire format) and its size in the info of a field.
+func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) {
+	fi.field = toField(f)
+	fi.wiretag = uint64(tag)<<3 | wt
+	fi.tagsize = SizeVarint(uint64(tag) << 3)
+}
+
+// setMarshaler fills up the sizer and marshaler in the info of a field.
+func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) {
+	switch f.Type.Kind() {
+	case reflect.Map:
+		// map field
+		fi.isPointer = true
+		fi.sizer, fi.marshaler = makeMapMarshaler(f)
+		return
+	case reflect.Ptr, reflect.Slice:
+		fi.isPointer = true
+	}
+	fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false)
+}
+
+// typeMarshaler returns the sizer and marshaler of a given field.
+// t is the type of the field.
+// tags is the generated "protobuf" tag of the field.
+// If nozero is true, zero value is not marshaled to the wire.
+// If oneof is true, it is a oneof field.
+func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) {
+	encoding := tags[0]
+
+	pointer := false
+	slice := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	packed := false
+	proto3 := false
+	ctype := false
+	isTime := false
+	isDuration := false
+	isWktPointer := false
+	validateUTF8 := true
+	for i := 2; i < len(tags); i++ {
+		if tags[i] == "packed" {
+			packed = true
+		}
+		if tags[i] == "proto3" {
+			proto3 = true
+		}
+		if strings.HasPrefix(tags[i], "customtype=") {
+			ctype = true
+		}
+		if tags[i] == "stdtime" {
+			isTime = true
+		}
+		if tags[i] == "stdduration" {
+			isDuration = true
+		}
+		if tags[i] == "wktptr" {
+			isWktPointer = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+	if !proto3 && !pointer && !slice {
+		nozero = false
+	}
+
+	if ctype {
+		if reflect.PtrTo(t).Implements(customType) {
+			if slice {
+				return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+			}
+			if pointer {
+				return makeCustomPtrMarshaler(getMarshalInfo(t))
+			}
+			return makeCustomMarshaler(getMarshalInfo(t))
+		} else {
+			panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+		}
+	}
+
+	if isTime {
+		if pointer {
+			if slice {
+				return makeTimePtrSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeTimePtrMarshaler(getMarshalInfo(t))
+		}
+		if slice {
+			return makeTimeSliceMarshaler(getMarshalInfo(t))
+		}
+		return makeTimeMarshaler(getMarshalInfo(t))
+	}
+
+	if isDuration {
+		if pointer {
+			if slice {
+				return makeDurationPtrSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeDurationPtrMarshaler(getMarshalInfo(t))
+		}
+		if slice {
+			return makeDurationSliceMarshaler(getMarshalInfo(t))
+		}
+		return makeDurationMarshaler(getMarshalInfo(t))
+	}
+
+	if isWktPointer {
+		switch t.Kind() {
+		case reflect.Float64:
+			if pointer {
+				if slice {
+					return makeStdDoubleValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdDoubleValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdDoubleValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdDoubleValueMarshaler(getMarshalInfo(t))
+		case reflect.Float32:
+			if pointer {
+				if slice {
+					return makeStdFloatValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdFloatValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdFloatValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdFloatValueMarshaler(getMarshalInfo(t))
+		case reflect.Int64:
+			if pointer {
+				if slice {
+					return makeStdInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdInt64ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdInt64ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdInt64ValueMarshaler(getMarshalInfo(t))
+		case reflect.Uint64:
+			if pointer {
+				if slice {
+					return makeStdUInt64ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdUInt64ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdUInt64ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdUInt64ValueMarshaler(getMarshalInfo(t))
+		case reflect.Int32:
+			if pointer {
+				if slice {
+					return makeStdInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdInt32ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdInt32ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdInt32ValueMarshaler(getMarshalInfo(t))
+		case reflect.Uint32:
+			if pointer {
+				if slice {
+					return makeStdUInt32ValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdUInt32ValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdUInt32ValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdUInt32ValueMarshaler(getMarshalInfo(t))
+		case reflect.Bool:
+			if pointer {
+				if slice {
+					return makeStdBoolValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdBoolValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdBoolValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdBoolValueMarshaler(getMarshalInfo(t))
+		case reflect.String:
+			if pointer {
+				if slice {
+					return makeStdStringValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdStringValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdStringValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdStringValueMarshaler(getMarshalInfo(t))
+		case uint8SliceType:
+			if pointer {
+				if slice {
+					return makeStdBytesValuePtrSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeStdBytesValuePtrMarshaler(getMarshalInfo(t))
+			}
+			if slice {
+				return makeStdBytesValueSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeStdBytesValueMarshaler(getMarshalInfo(t))
+		default:
+			panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+		}
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return sizeBoolPtr, appendBoolPtr
+		}
+		if slice {
+			if packed {
+				return sizeBoolPackedSlice, appendBoolPackedSlice
+			}
+			return sizeBoolSlice, appendBoolSlice
+		}
+		if nozero {
+			return sizeBoolValueNoZero, appendBoolValueNoZero
+		}
+		return sizeBoolValue, appendBoolValue
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixed32Ptr, appendFixed32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed32PackedSlice, appendFixed32PackedSlice
+				}
+				return sizeFixed32Slice, appendFixed32Slice
+			}
+			if nozero {
+				return sizeFixed32ValueNoZero, appendFixed32ValueNoZero
+			}
+			return sizeFixed32Value, appendFixed32Value
+		case "varint":
+			if pointer {
+				return sizeVarint32Ptr, appendVarint32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint32PackedSlice, appendVarint32PackedSlice
+				}
+				return sizeVarint32Slice, appendVarint32Slice
+			}
+			if nozero {
+				return sizeVarint32ValueNoZero, appendVarint32ValueNoZero
+			}
+			return sizeVarint32Value, appendVarint32Value
+		}
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return sizeFixedS32Ptr, appendFixedS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS32PackedSlice, appendFixedS32PackedSlice
+				}
+				return sizeFixedS32Slice, appendFixedS32Slice
+			}
+			if nozero {
+				return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero
+			}
+			return sizeFixedS32Value, appendFixedS32Value
+		case "varint":
+			if pointer {
+				return sizeVarintS32Ptr, appendVarintS32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS32PackedSlice, appendVarintS32PackedSlice
+				}
+				return sizeVarintS32Slice, appendVarintS32Slice
+			}
+			if nozero {
+				return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero
+			}
+			return sizeVarintS32Value, appendVarintS32Value
+		case "zigzag32":
+			if pointer {
+				return sizeZigzag32Ptr, appendZigzag32Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag32PackedSlice, appendZigzag32PackedSlice
+				}
+				return sizeZigzag32Slice, appendZigzag32Slice
+			}
+			if nozero {
+				return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero
+			}
+			return sizeZigzag32Value, appendZigzag32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixed64Ptr, appendFixed64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixed64PackedSlice, appendFixed64PackedSlice
+				}
+				return sizeFixed64Slice, appendFixed64Slice
+			}
+			if nozero {
+				return sizeFixed64ValueNoZero, appendFixed64ValueNoZero
+			}
+			return sizeFixed64Value, appendFixed64Value
+		case "varint":
+			if pointer {
+				return sizeVarint64Ptr, appendVarint64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarint64PackedSlice, appendVarint64PackedSlice
+				}
+				return sizeVarint64Slice, appendVarint64Slice
+			}
+			if nozero {
+				return sizeVarint64ValueNoZero, appendVarint64ValueNoZero
+			}
+			return sizeVarint64Value, appendVarint64Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return sizeFixedS64Ptr, appendFixedS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeFixedS64PackedSlice, appendFixedS64PackedSlice
+				}
+				return sizeFixedS64Slice, appendFixedS64Slice
+			}
+			if nozero {
+				return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero
+			}
+			return sizeFixedS64Value, appendFixedS64Value
+		case "varint":
+			if pointer {
+				return sizeVarintS64Ptr, appendVarintS64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeVarintS64PackedSlice, appendVarintS64PackedSlice
+				}
+				return sizeVarintS64Slice, appendVarintS64Slice
+			}
+			if nozero {
+				return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero
+			}
+			return sizeVarintS64Value, appendVarintS64Value
+		case "zigzag64":
+			if pointer {
+				return sizeZigzag64Ptr, appendZigzag64Ptr
+			}
+			if slice {
+				if packed {
+					return sizeZigzag64PackedSlice, appendZigzag64PackedSlice
+				}
+				return sizeZigzag64Slice, appendZigzag64Slice
+			}
+			if nozero {
+				return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero
+			}
+			return sizeZigzag64Value, appendZigzag64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return sizeFloat32Ptr, appendFloat32Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat32PackedSlice, appendFloat32PackedSlice
+			}
+			return sizeFloat32Slice, appendFloat32Slice
+		}
+		if nozero {
+			return sizeFloat32ValueNoZero, appendFloat32ValueNoZero
+		}
+		return sizeFloat32Value, appendFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return sizeFloat64Ptr, appendFloat64Ptr
+		}
+		if slice {
+			if packed {
+				return sizeFloat64PackedSlice, appendFloat64PackedSlice
+			}
+			return sizeFloat64Slice, appendFloat64Slice
+		}
+		if nozero {
+			return sizeFloat64ValueNoZero, appendFloat64ValueNoZero
+		}
+		return sizeFloat64Value, appendFloat64Value
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return sizeStringPtr, appendUTF8StringPtr
+			}
+			if slice {
+				return sizeStringSlice, appendUTF8StringSlice
+			}
+			if nozero {
+				return sizeStringValueNoZero, appendUTF8StringValueNoZero
+			}
+			return sizeStringValue, appendUTF8StringValue
+		}
+		if pointer {
+			return sizeStringPtr, appendStringPtr
+		}
+		if slice {
+			return sizeStringSlice, appendStringSlice
+		}
+		if nozero {
+			return sizeStringValueNoZero, appendStringValueNoZero
+		}
+		return sizeStringValue, appendStringValue
+	case reflect.Slice:
+		if slice {
+			return sizeBytesSlice, appendBytesSlice
+		}
+		if oneof {
+			// Oneof bytes field may also have "proto3" tag.
+			// We want to marshal it as a oneof field. Do this
+			// check before the proto3 check.
+			return sizeBytesOneof, appendBytesOneof
+		}
+		if proto3 {
+			return sizeBytes3, appendBytes3
+		}
+		return sizeBytes, appendBytes
+	case reflect.Struct:
+		switch encoding {
+		case "group":
+			if slice {
+				return makeGroupSliceMarshaler(getMarshalInfo(t))
+			}
+			return makeGroupMarshaler(getMarshalInfo(t))
+		case "bytes":
+			if pointer {
+				if slice {
+					return makeMessageSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeMessageMarshaler(getMarshalInfo(t))
+			} else {
+				if slice {
+					return makeMessageRefSliceMarshaler(getMarshalInfo(t))
+				}
+				return makeMessageRefMarshaler(getMarshalInfo(t))
+			}
+		}
+	}
+	panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding))
+}
+
+// Below are functions to size/marshal a specific type of a field.
+// They are stored in the field's info, and called by function pointers.
+// They have type sizer or marshaler.
+
+func sizeFixed32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixed32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixed32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixedS32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFixedS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFloat32Value(_ pointer, tagsize int) int {
+	return 4 + tagsize
+}
+func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return 0
+	}
+	return 4 + tagsize
+}
+func sizeFloat32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	return (4 + tagsize) * len(s)
+}
+func sizeFloat32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize
+}
+func sizeFixed64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixed64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixed64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFixedS64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFixedS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeFloat64Value(_ pointer, tagsize int) int {
+	return 8 + tagsize
+}
+func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return 0
+	}
+	return 8 + tagsize
+}
+func sizeFloat64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	return (8 + tagsize) * len(s)
+}
+func sizeFloat64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize
+}
+func sizeVarint32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarint32Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarint32Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarint32PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarint64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(v) + tagsize
+}
+func sizeVarint64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(*p) + tagsize
+}
+func sizeVarint64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v) + tagsize
+	}
+	return n
+}
+func sizeVarint64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeVarintS64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v)) + tagsize
+}
+func sizeVarintS64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	return SizeVarint(uint64(*p)) + tagsize
+}
+func sizeVarintS64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v)) + tagsize
+	}
+	return n
+}
+func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag32Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Ptr(ptr pointer, tagsize int) int {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+}
+func sizeZigzag32Slice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize
+	}
+	return n
+}
+func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeZigzag64Value(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return 0
+	}
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Ptr(ptr pointer, tagsize int) int {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+}
+func sizeZigzag64Slice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize
+	}
+	return n
+}
+func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return 0
+	}
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	return n + SizeVarint(uint64(n)) + tagsize
+}
+func sizeBoolValue(_ pointer, tagsize int) int {
+	return 1 + tagsize
+}
+func sizeBoolValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toBool()
+	if !v {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return 0
+	}
+	return 1 + tagsize
+}
+func sizeBoolSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	return (1 + tagsize) * len(s)
+}
+func sizeBoolPackedSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return 0
+	}
+	return len(s) + SizeVarint(uint64(len(s))) + tagsize
+}
+func sizeStringValue(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringValueNoZero(ptr pointer, tagsize int) int {
+	v := *ptr.toString()
+	if v == "" {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringPtr(ptr pointer, tagsize int) int {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return 0
+	}
+	v := *p
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeStringSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toStringSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+func sizeBytes(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if v == nil {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytes3(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return 0
+	}
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesOneof(ptr pointer, tagsize int) int {
+	v := *ptr.toBytes()
+	return len(v) + SizeVarint(uint64(len(v))) + tagsize
+}
+func sizeBytesSlice(ptr pointer, tagsize int) int {
+	s := *ptr.toBytesSlice()
+	n := 0
+	for _, v := range s {
+		n += len(v) + SizeVarint(uint64(len(v))) + tagsize
+	}
+	return n
+}
+
+// appendFixed32 appends an encoded fixed32 to b.
+func appendFixed32(b []byte, v uint32) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24))
+	return b
+}
+
+// appendFixed64 appends an encoded fixed64 to b.
+func appendFixed64(b []byte, v uint64) []byte {
+	b = append(b,
+		byte(v),
+		byte(v>>8),
+		byte(v>>16),
+		byte(v>>24),
+		byte(v>>32),
+		byte(v>>40),
+		byte(v>>48),
+		byte(v>>56))
+	return b
+}
+
+// appendVarint appends an encoded varint to b.
+func appendVarint(b []byte, v uint64) []byte {
+	// TODO: make 1-byte (maybe 2-byte) case inline-able, once we
+	// have non-leaf inliner.
+	switch {
+	case v < 1<<7:
+		b = append(b, byte(v))
+	case v < 1<<14:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte(v>>7))
+	case v < 1<<21:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte(v>>14))
+	case v < 1<<28:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte(v>>21))
+	case v < 1<<35:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte(v>>28))
+	case v < 1<<42:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte(v>>35))
+	case v < 1<<49:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte(v>>42))
+	case v < 1<<56:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte(v>>49))
+	case v < 1<<63:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte(v>>56))
+	default:
+		b = append(b,
+			byte(v&0x7f|0x80),
+			byte((v>>7)&0x7f|0x80),
+			byte((v>>14)&0x7f|0x80),
+			byte((v>>21)&0x7f|0x80),
+			byte((v>>28)&0x7f|0x80),
+			byte((v>>35)&0x7f|0x80),
+			byte((v>>42)&0x7f|0x80),
+			byte((v>>49)&0x7f|0x80),
+			byte((v>>56)&0x7f|0x80),
+			1)
+	}
+	return b
+}
+
+func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, *p)
+	return b, nil
+}
+func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, v)
+	}
+	return b, nil
+}
+func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(v))
+	return b, nil
+}
+func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, uint32(*p))
+	return b, nil
+}
+func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, uint32(v))
+	}
+	return b, nil
+}
+func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float32bits(*ptr.toFloat32())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, v)
+	return b, nil
+}
+func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed32(b, math.Float32bits(*p))
+	return b, nil
+}
+func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(4*len(s)))
+	for _, v := range s {
+		b = appendFixed32(b, math.Float32bits(v))
+	}
+	return b, nil
+}
+func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, *p)
+	return b, nil
+}
+func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, v)
+	}
+	return b, nil
+}
+func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(v))
+	return b, nil
+}
+func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, uint64(*p))
+	return b, nil
+}
+func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, uint64(v))
+	}
+	return b, nil
+}
+func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := math.Float64bits(*ptr.toFloat64())
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, v)
+	return b, nil
+}
+func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toFloat64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendFixed64(b, math.Float64bits(*p))
+	return b, nil
+}
+func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toFloat64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(8*len(s)))
+	for _, v := range s {
+		b = appendFixed64(b, math.Float64bits(v))
+	}
+	return b, nil
+}
+func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toUint64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, v)
+	return b, nil
+}
+func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toUint64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, *p)
+	return b, nil
+}
+func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toUint64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(v)
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, v)
+	}
+	return b, nil
+}
+func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v))
+	return b, nil
+}
+func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(*p))
+	return b, nil
+}
+func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v))
+	}
+	return b, nil
+}
+func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt32()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := ptr.getInt32Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	return b, nil
+}
+func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := ptr.getInt32Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31))))
+	}
+	return b, nil
+}
+func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toInt64()
+	if v == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toInt64Ptr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	v := *p
+	b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	return b, nil
+}
+func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toInt64Slice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	// compute size
+	n := 0
+	for _, v := range s {
+		n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63)))
+	}
+	b = appendVarint(b, uint64(n))
+	for _, v := range s {
+		b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63)))
+	}
+	return b, nil
+}
+func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	b = appendVarint(b, wiretag)
+	if v {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBool()
+	if !v {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = append(b, 1)
+	return b, nil
+}
+
+func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toBoolPtr()
+	if p == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	if *p {
+		b = append(b, 1)
+	} else {
+		b = append(b, 0)
+	}
+	return b, nil
+}
+func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBoolSlice()
+	if len(s) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag&^7|WireBytes)
+	b = appendVarint(b, uint64(len(s)))
+	for _, v := range s {
+		if v {
+			b = append(b, 1)
+		} else {
+			b = append(b, 0)
+		}
+	}
+	return b, nil
+}
+func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	v := *ptr.toString()
+	if v == "" {
+		return b, nil
+	}
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	p := *ptr.toStringPtr()
+	if p == nil {
+		return b, nil
+	}
+	v := *p
+	if !utf8.ValidString(v) {
+		invalidUTF8 = true
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	var invalidUTF8 bool
+	s := *ptr.toStringSlice()
+	for _, v := range s {
+		if !utf8.ValidString(v) {
+			invalidUTF8 = true
+		}
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	if invalidUTF8 {
+		return b, errInvalidUTF8
+	}
+	return b, nil
+}
+func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if v == nil {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	if len(v) == 0 {
+		return b, nil
+	}
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	v := *ptr.toBytes()
+	b = appendVarint(b, wiretag)
+	b = appendVarint(b, uint64(len(v)))
+	b = append(b, v...)
+	return b, nil
+}
+func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) {
+	s := *ptr.toBytesSlice()
+	for _, v := range s {
+		b = appendVarint(b, wiretag)
+		b = appendVarint(b, uint64(len(v)))
+		b = append(b, v...)
+	}
+	return b, nil
+}
+
+// makeGroupMarshaler returns the sizer and marshaler for a group.
+// u is the marshal info of the underlying message.
+func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			return u.size(p) + 2*tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			var err error
+			b = appendVarint(b, wiretag) // start group
+			b, err = u.marshal(b, p, deterministic)
+			b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+			return b, err
+		}
+}
+
+// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice.
+// u is the marshal info of the underlying message.
+func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				n += u.size(v) + 2*tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag) // start group
+				b, err = u.marshal(b, v, deterministic)
+				b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMessageMarshaler returns the sizer and marshaler for a message field.
+// u is the marshal info of the message.
+func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.size(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getPointer()
+			if p.isNil() {
+				return b, nil
+			}
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(p)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, p, deterministic)
+		}
+}
+
+// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice.
+// u is the marshal info of the message.
+func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getPointerSlice()
+			n := 0
+			for _, v := range s {
+				if v.isNil() {
+					continue
+				}
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getPointerSlice()
+			var err error
+			var nerr nonFatal
+			for _, v := range s {
+				if v.isNil() {
+					return b, errRepeatedHasNil
+				}
+				b = appendVarint(b, wiretag)
+				siz := u.cachedsize(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if !nerr.Merge(err) {
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeMapMarshaler returns the sizer and marshaler for a map field.
+// f is the pointer to the reflect data structure of the field.
+func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) {
+	// figure out key and value type
+	t := f.Type
+	keyType := t.Key()
+	valType := t.Elem()
+	tags := strings.Split(f.Tag.Get("protobuf"), ",")
+	keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	stdOptions := false
+	for _, t := range tags {
+		if strings.HasPrefix(t, "customtype=") {
+			valTags = append(valTags, t)
+		}
+		if t == "stdtime" {
+			valTags = append(valTags, t)
+			stdOptions = true
+		}
+		if t == "stdduration" {
+			valTags = append(valTags, t)
+			stdOptions = true
+		}
+		if t == "wktptr" {
+			valTags = append(valTags, t)
+		}
+	}
+	keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map
+	valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map
+	keyWireTag := 1<<3 | wiretype(keyTags[0])
+	valWireTag := 2<<3 | wiretype(valTags[0])
+
+	// We create an interface to get the addresses of the map key and value.
+	// If value is pointer-typed, the interface is a direct interface, the
+	// idata itself is the value. Otherwise, the idata is the pointer to the
+	// value.
+	// Key cannot be pointer-typed.
+	valIsPtr := valType.Kind() == reflect.Ptr
+
+	// If value is a message with nested maps, calling
+	// valSizer in marshal may be quadratic. We should use
+	// cached version in marshal (but not in size).
+	// If value is not message type, we don't have size cache,
+	// but it cannot be nested either. Just use valSizer.
+	valCachedSizer := valSizer
+	if valIsPtr && !stdOptions && valType.Elem().Kind() == reflect.Struct {
+		u := getMarshalInfo(valType.Elem())
+		valCachedSizer = func(ptr pointer, tagsize int) int {
+			// Same as message sizer, but use cache.
+			p := ptr.getPointer()
+			if p.isNil() {
+				return 0
+			}
+			siz := u.cachedsize(p)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		}
+	}
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(t).Elem() // the map
+			n := 0
+			for _, k := range m.MapKeys() {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)             // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr)          // pointer to value
+				siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(t).Elem() // the map
+			var err error
+			keys := m.MapKeys()
+			if len(keys) > 1 && deterministic {
+				sort.Sort(mapKeys(keys))
+			}
+
+			var nerr nonFatal
+			for _, k := range keys {
+				ki := k.Interface()
+				vi := m.MapIndex(k).Interface()
+				kaddr := toAddrPointer(&ki, false)    // pointer to key
+				vaddr := toAddrPointer(&vi, valIsPtr) // pointer to value
+				b = appendVarint(b, tag)
+				siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1)
+				b = appendVarint(b, uint64(siz))
+				b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic)
+				if !nerr.Merge(err) {
+					return b, err
+				}
+				b, err = valMarshaler(b, vaddr, valWireTag, deterministic)
+				if err != ErrNil && !nerr.Merge(err) { // allow nil value in map
+					return b, err
+				}
+			}
+			return b, nerr.E
+		}
+}
+
+// makeOneOfMarshaler returns the sizer and marshaler for a oneof field.
+// fi is the marshal info of the field.
+// f is the pointer to the reflect data structure of the field.
+func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) {
+	// Oneof field is an interface. We need to get the actual data type on the fly.
+	t := f.Type
+	return func(ptr pointer, _ int) int {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return 0
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			e := fi.oneofElems[telem]
+			return e.sizer(p, e.tagsize)
+		},
+		func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) {
+			p := ptr.getInterfacePointer()
+			if p.isNil() {
+				return b, nil
+			}
+			v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct
+			telem := v.Type()
+			if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() {
+				return b, errOneofHasNil
+			}
+			e := fi.oneofElems[telem]
+			return e.marshaler(b, p, e.wiretag, deterministic)
+		}
+}
+
+// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field.
+func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b.
+func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for _, e := range m {
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				b = append(b, e.enc...)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	// Not sure this is required, but the old code does it.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// message set format is:
+//   message MessageSet {
+//     repeated group Item = 1 {
+//       required int32 type_id = 2;
+//       required string message = 3;
+//     };
+//   }
+
+// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field
+// in message set format (above).
+func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return 0
+	}
+	mu.Lock()
+
+	n := 0
+	for id, e := range m {
+		n += 2                          // start group, end group. tag = 1 (size=1)
+		n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1)
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			siz := len(msgWithLen)
+			n += siz + 1 // message, tag = 3 (size=1)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, 1) // message, tag = 3 (size=1)
+	}
+	mu.Unlock()
+	return n
+}
+
+// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above)
+// to the end of byte slice b.
+func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) {
+	m, mu := ext.extensionsRead()
+	if m == nil {
+		return b, nil
+	}
+	mu.Lock()
+	defer mu.Unlock()
+
+	var err error
+	var nerr nonFatal
+
+	// Fast-path for common cases: zero or one extensions.
+	// Don't bother sorting the keys.
+	if len(m) <= 1 {
+		for id, e := range m {
+			b = append(b, 1<<3|WireStartGroup)
+			b = append(b, 2<<3|WireVarint)
+			b = appendVarint(b, uint64(id))
+
+			if e.value == nil || e.desc == nil {
+				// Extension is only in its encoded form.
+				msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+				b = append(b, 3<<3|WireBytes)
+				b = append(b, msgWithLen...)
+				b = append(b, 1<<3|WireEndGroup)
+				continue
+			}
+
+			// We don't skip extensions that have an encoded form set,
+			// because the extension value may have been mutated after
+			// the last time this function was called.
+
+			ei := u.getExtElemInfo(e.desc)
+			v := e.value
+			p := toAddrPointer(&v, ei.isptr)
+			b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+			if !nerr.Merge(err) {
+				return b, err
+			}
+			b = append(b, 1<<3|WireEndGroup)
+		}
+		return b, nerr.E
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	for _, id := range keys {
+		e := m[int32(id)]
+		b = append(b, 1<<3|WireStartGroup)
+		b = append(b, 2<<3|WireVarint)
+		b = appendVarint(b, uint64(id))
+
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint
+			b = append(b, 3<<3|WireBytes)
+			b = append(b, msgWithLen...)
+			b = append(b, 1<<3|WireEndGroup)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic)
+		b = append(b, 1<<3|WireEndGroup)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// sizeV1Extensions computes the size of encoded data for a V1-API extension field.
+func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int {
+	if m == nil {
+		return 0
+	}
+
+	n := 0
+	for _, e := range m {
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			n += len(e.enc)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		n += ei.sizer(p, ei.tagsize)
+	}
+	return n
+}
+
+// appendV1Extensions marshals a V1-API extension field to the end of byte slice b.
+func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return b, nil
+	}
+
+	// Sort the keys to provide a deterministic encoding.
+	keys := make([]int, 0, len(m))
+	for k := range m {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+
+	var err error
+	var nerr nonFatal
+	for _, k := range keys {
+		e := m[int32(k)]
+		if e.value == nil || e.desc == nil {
+			// Extension is only in its encoded form.
+			b = append(b, e.enc...)
+			continue
+		}
+
+		// We don't skip extensions that have an encoded form set,
+		// because the extension value may have been mutated after
+		// the last time this function was called.
+
+		ei := u.getExtElemInfo(e.desc)
+		v := e.value
+		p := toAddrPointer(&v, ei.isptr)
+		b, err = ei.marshaler(b, p, ei.wiretag, deterministic)
+		if !nerr.Merge(err) {
+			return b, err
+		}
+	}
+	return b, nerr.E
+}
+
+// newMarshaler is the interface representing objects that can marshal themselves.
+//
+// This exists to support protoc-gen-go generated messages.
+// The proto package will stop type-asserting to this interface in the future.
+//
+// DO NOT DEPEND ON THIS.
+type newMarshaler interface {
+	XXX_Size() int
+	XXX_Marshal(b []byte, deterministic bool) ([]byte, error)
+}
+
+// Size returns the encoded size of a protocol buffer message.
+// This is the main entry point.
+func Size(pb Message) int {
+	if m, ok := pb.(newMarshaler); ok {
+		return m.XXX_Size()
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		b, _ := m.Marshal()
+		return len(b)
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return 0
+	}
+	var info InternalMessageInfo
+	return info.Size(pb)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, returning the data.
+// This is the main entry point.
+func Marshal(pb Message) ([]byte, error) {
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		b := make([]byte, 0, siz)
+		return m.XXX_Marshal(b, false)
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		return m.Marshal()
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return nil, ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	b := make([]byte, 0, siz)
+	return info.Marshal(b, pb, false)
+}
+
+// Marshal takes a protocol buffer message
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+// This is an alternative entry point. It is not necessary to use
+// a Buffer for most applications.
+func (p *Buffer) Marshal(pb Message) error {
+	var err error
+	if p.deterministic {
+		if _, ok := pb.(Marshaler); ok {
+			return fmt.Errorf("proto: deterministic not supported by the Marshal method of %T", pb)
+		}
+	}
+	if m, ok := pb.(newMarshaler); ok {
+		siz := m.XXX_Size()
+		p.grow(siz) // make sure buf has enough capacity
+		pp := p.buf[len(p.buf) : len(p.buf) : len(p.buf)+siz]
+		pp, err = m.XXX_Marshal(pp, p.deterministic)
+		p.buf = append(p.buf, pp...)
+		return err
+	}
+	if m, ok := pb.(Marshaler); ok {
+		// If the message can marshal itself, let it do it, for compatibility.
+		// NOTE: This is not efficient.
+		var b []byte
+		b, err = m.Marshal()
+		p.buf = append(p.buf, b...)
+		return err
+	}
+	// in case somehow we didn't generate the wrapper
+	if pb == nil {
+		return ErrNil
+	}
+	var info InternalMessageInfo
+	siz := info.Size(pb)
+	p.grow(siz) // make sure buf has enough capacity
+	p.buf, err = info.Marshal(p.buf, pb, p.deterministic)
+	return err
+}
+
+// grow grows the buffer's capacity, if necessary, to guarantee space for
+// another n bytes. After grow(n), at least n bytes can be written to the
+// buffer without another allocation.
+func (p *Buffer) grow(n int) {
+	need := len(p.buf) + n
+	if need <= cap(p.buf) {
+		return
+	}
+	newCap := len(p.buf) * 2
+	if newCap < need {
+		newCap = need
+	}
+	p.buf = append(make([]byte, 0, newCap), p.buf...)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
new file mode 100644
index 0000000..997f57c
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_marshal_gogo.go
@@ -0,0 +1,388 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+// makeMessageRefMarshaler differs a bit from makeMessageMarshaler
+// It marshal a message T instead of a *T
+func makeMessageRefMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			siz := u.size(ptr)
+			return siz + SizeVarint(uint64(siz)) + tagsize
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			b = appendVarint(b, wiretag)
+			siz := u.cachedsize(ptr)
+			b = appendVarint(b, uint64(siz))
+			return u.marshal(b, ptr, deterministic)
+		}
+}
+
+// makeMessageRefSliceMarshaler differs quite a lot from makeMessageSliceMarshaler
+// It marshals a slice of messages []T instead of []*T
+func makeMessageRefSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				e := elem.Interface()
+				v := toAddrPointer(&e, false)
+				siz := u.size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			var err, errreq error
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				e := elem.Interface()
+				v := toAddrPointer(&e, false)
+				b = appendVarint(b, wiretag)
+				siz := u.size(v)
+				b = appendVarint(b, uint64(siz))
+				b, err = u.marshal(b, v, deterministic)
+
+				if err != nil {
+					if _, ok := err.(*RequiredNotSetError); ok {
+						// Required field in submessage is not set.
+						// We record the error but keep going, to give a complete marshaling.
+						if errreq == nil {
+							errreq = err
+						}
+						continue
+					}
+					if err == ErrNil {
+						err = errRepeatedHasNil
+					}
+					return b, err
+				}
+			}
+
+			return b, errreq
+		}
+}
+
+func makeCustomPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+			siz := m.Size()
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			m := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(custom)
+			siz := m.Size()
+			buf, err := m.Marshal()
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(siz))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeCustomMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			m := ptr.asPointerTo(u.typ).Interface().(custom)
+			siz := m.Size()
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			m := ptr.asPointerTo(u.typ).Interface().(custom)
+			siz := m.Size()
+			buf, err := m.Marshal()
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(siz))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimeMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return 0
+			}
+			siz := Size(ts)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return nil, err
+			}
+			buf, err := Marshal(ts)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return 0
+			}
+			siz := Size(ts)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Time)
+			ts, err := timestampProto(*t)
+			if err != nil {
+				return nil, err
+			}
+			buf, err := Marshal(ts)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeTimeSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(time.Time)
+				ts, err := timestampProto(t)
+				if err != nil {
+					return 0
+				}
+				siz := Size(ts)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(time.Time)
+				ts, err := timestampProto(t)
+				if err != nil {
+					return nil, err
+				}
+				siz := Size(ts)
+				buf, err := Marshal(ts)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeTimePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*time.Time)
+				ts, err := timestampProto(*t)
+				if err != nil {
+					return 0
+				}
+				siz := Size(ts)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*time.Time)
+				ts, err := timestampProto(*t)
+				if err != nil {
+					return nil, err
+				}
+				siz := Size(ts)
+				buf, err := Marshal(ts)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeDurationMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+			dur := durationProto(*d)
+			siz := Size(dur)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			d := ptr.asPointerTo(u.typ).Interface().(*time.Duration)
+			dur := durationProto(*d)
+			buf, err := Marshal(dur)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeDurationPtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+			dur := durationProto(*d)
+			siz := Size(dur)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			d := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*time.Duration)
+			dur := durationProto(*d)
+			buf, err := Marshal(dur)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeDurationSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(time.Duration)
+				dur := durationProto(d)
+				siz := Size(dur)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(time.Duration)
+				dur := durationProto(d)
+				siz := Size(dur)
+				buf, err := Marshal(dur)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeDurationPtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(*time.Duration)
+				dur := durationProto(*d)
+				siz := Size(dur)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				d := elem.Interface().(*time.Duration)
+				dur := durationProto(*d)
+				siz := Size(dur)
+				buf, err := Marshal(dur)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_merge.go b/vendor/github.com/gogo/protobuf/proto/table_merge.go
new file mode 100644
index 0000000..60dcf70
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_merge.go
@@ -0,0 +1,676 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strings"
+	"sync"
+	"sync/atomic"
+)
+
+// Merge merges the src message into dst.
+// This assumes that dst and src of the same type and are non-nil.
+func (a *InternalMessageInfo) Merge(dst, src Message) {
+	mi := atomicLoadMergeInfo(&a.merge)
+	if mi == nil {
+		mi = getMergeInfo(reflect.TypeOf(dst).Elem())
+		atomicStoreMergeInfo(&a.merge, mi)
+	}
+	mi.merge(toPointer(&dst), toPointer(&src))
+}
+
+type mergeInfo struct {
+	typ reflect.Type
+
+	initialized int32 // 0: only typ is valid, 1: everything is valid
+	lock        sync.Mutex
+
+	fields       []mergeFieldInfo
+	unrecognized field // Offset of XXX_unrecognized
+}
+
+type mergeFieldInfo struct {
+	field field // Offset of field, guaranteed to be valid
+
+	// isPointer reports whether the value in the field is a pointer.
+	// This is true for the following situations:
+	//	* Pointer to struct
+	//	* Pointer to basic type (proto2 only)
+	//	* Slice (first value in slice header is a pointer)
+	//	* String (first value in string header is a pointer)
+	isPointer bool
+
+	// basicWidth reports the width of the field assuming that it is directly
+	// embedded in the struct (as is the case for basic types in proto3).
+	// The possible values are:
+	// 	0: invalid
+	//	1: bool
+	//	4: int32, uint32, float32
+	//	8: int64, uint64, float64
+	basicWidth int
+
+	// Where dst and src are pointers to the types being merged.
+	merge func(dst, src pointer)
+}
+
+var (
+	mergeInfoMap  = map[reflect.Type]*mergeInfo{}
+	mergeInfoLock sync.Mutex
+)
+
+func getMergeInfo(t reflect.Type) *mergeInfo {
+	mergeInfoLock.Lock()
+	defer mergeInfoLock.Unlock()
+	mi := mergeInfoMap[t]
+	if mi == nil {
+		mi = &mergeInfo{typ: t}
+		mergeInfoMap[t] = mi
+	}
+	return mi
+}
+
+// merge merges src into dst assuming they are both of type *mi.typ.
+func (mi *mergeInfo) merge(dst, src pointer) {
+	if dst.isNil() {
+		panic("proto: nil destination")
+	}
+	if src.isNil() {
+		return // Nothing to do.
+	}
+
+	if atomic.LoadInt32(&mi.initialized) == 0 {
+		mi.computeMergeInfo()
+	}
+
+	for _, fi := range mi.fields {
+		sfp := src.offset(fi.field)
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string
+				continue
+			}
+			if fi.basicWidth > 0 {
+				switch {
+				case fi.basicWidth == 1 && !*sfp.toBool():
+					continue
+				case fi.basicWidth == 4 && *sfp.toUint32() == 0:
+					continue
+				case fi.basicWidth == 8 && *sfp.toUint64() == 0:
+					continue
+				}
+			}
+		}
+
+		dfp := dst.offset(fi.field)
+		fi.merge(dfp, sfp)
+	}
+
+	// TODO: Make this faster?
+	out := dst.asPointerTo(mi.typ).Elem()
+	in := src.asPointerTo(mi.typ).Elem()
+	if emIn, err := extendable(in.Addr().Interface()); err == nil {
+		emOut, _ := extendable(out.Addr().Interface())
+		mIn, muIn := emIn.extensionsRead()
+		if mIn != nil {
+			mOut := emOut.extensionsWrite()
+			muIn.Lock()
+			mergeExtension(mOut, mIn)
+			muIn.Unlock()
+		}
+	}
+
+	if mi.unrecognized.IsValid() {
+		if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 {
+			*dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...)
+		}
+	}
+}
+
+func (mi *mergeInfo) computeMergeInfo() {
+	mi.lock.Lock()
+	defer mi.lock.Unlock()
+	if mi.initialized != 0 {
+		return
+	}
+	t := mi.typ
+	n := t.NumField()
+
+	props := GetProperties(t)
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if strings.HasPrefix(f.Name, "XXX_") {
+			continue
+		}
+
+		mfi := mergeFieldInfo{field: toField(&f)}
+		tf := f.Type
+
+		// As an optimization, we can avoid the merge function call cost
+		// if we know for sure that the source will have no effect
+		// by checking if it is the zero value.
+		if unsafeAllowed {
+			switch tf.Kind() {
+			case reflect.Ptr, reflect.Slice, reflect.String:
+				// As a special case, we assume slices and strings are pointers
+				// since we know that the first field in the SliceSlice or
+				// StringHeader is a data pointer.
+				mfi.isPointer = true
+			case reflect.Bool:
+				mfi.basicWidth = 1
+			case reflect.Int32, reflect.Uint32, reflect.Float32:
+				mfi.basicWidth = 4
+			case reflect.Int64, reflect.Uint64, reflect.Float64:
+				mfi.basicWidth = 8
+			}
+		}
+
+		// Unwrap tf to get at its most basic type.
+		var isPointer, isSlice bool
+		if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
+			isSlice = true
+			tf = tf.Elem()
+		}
+		if tf.Kind() == reflect.Ptr {
+			isPointer = true
+			tf = tf.Elem()
+		}
+		if isPointer && isSlice && tf.Kind() != reflect.Struct {
+			panic("both pointer and slice for basic type in " + tf.Name())
+		}
+
+		switch tf.Kind() {
+		case reflect.Int32:
+			switch {
+			case isSlice: // E.g., []int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Slice is not defined (see pointer_reflect.go).
+					/*
+						sfsp := src.toInt32Slice()
+						if *sfsp != nil {
+							dfsp := dst.toInt32Slice()
+							*dfsp = append(*dfsp, *sfsp...)
+							if *dfsp == nil {
+								*dfsp = []int64{}
+							}
+						}
+					*/
+					sfs := src.getInt32Slice()
+					if sfs != nil {
+						dfs := dst.getInt32Slice()
+						dfs = append(dfs, sfs...)
+						if dfs == nil {
+							dfs = []int32{}
+						}
+						dst.setInt32Slice(dfs)
+					}
+				}
+			case isPointer: // E.g., *int32
+				mfi.merge = func(dst, src pointer) {
+					// NOTE: toInt32Ptr is not defined (see pointer_reflect.go).
+					/*
+						sfpp := src.toInt32Ptr()
+						if *sfpp != nil {
+							dfpp := dst.toInt32Ptr()
+							if *dfpp == nil {
+								*dfpp = Int32(**sfpp)
+							} else {
+								**dfpp = **sfpp
+							}
+						}
+					*/
+					sfp := src.getInt32Ptr()
+					if sfp != nil {
+						dfp := dst.getInt32Ptr()
+						if dfp == nil {
+							dst.setInt32Ptr(*sfp)
+						} else {
+							*dfp = *sfp
+						}
+					}
+				}
+			default: // E.g., int32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt32(); v != 0 {
+						*dst.toInt32() = v
+					}
+				}
+			}
+		case reflect.Int64:
+			switch {
+			case isSlice: // E.g., []int64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toInt64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toInt64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []int64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *int64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toInt64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toInt64Ptr()
+						if *dfpp == nil {
+							*dfpp = Int64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., int64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toInt64(); v != 0 {
+						*dst.toInt64() = v
+					}
+				}
+			}
+		case reflect.Uint32:
+			switch {
+			case isSlice: // E.g., []uint32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint32Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint32(); v != 0 {
+						*dst.toUint32() = v
+					}
+				}
+			}
+		case reflect.Uint64:
+			switch {
+			case isSlice: // E.g., []uint64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toUint64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toUint64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []uint64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *uint64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toUint64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toUint64Ptr()
+						if *dfpp == nil {
+							*dfpp = Uint64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., uint64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toUint64(); v != 0 {
+						*dst.toUint64() = v
+					}
+				}
+			}
+		case reflect.Float32:
+			switch {
+			case isSlice: // E.g., []float32
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat32Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat32Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float32{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float32
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat32Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat32Ptr()
+						if *dfpp == nil {
+							*dfpp = Float32(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float32
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat32(); v != 0 {
+						*dst.toFloat32() = v
+					}
+				}
+			}
+		case reflect.Float64:
+			switch {
+			case isSlice: // E.g., []float64
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toFloat64Slice()
+					if *sfsp != nil {
+						dfsp := dst.toFloat64Slice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []float64{}
+						}
+					}
+				}
+			case isPointer: // E.g., *float64
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toFloat64Ptr()
+					if *sfpp != nil {
+						dfpp := dst.toFloat64Ptr()
+						if *dfpp == nil {
+							*dfpp = Float64(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., float64
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toFloat64(); v != 0 {
+						*dst.toFloat64() = v
+					}
+				}
+			}
+		case reflect.Bool:
+			switch {
+			case isSlice: // E.g., []bool
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toBoolSlice()
+					if *sfsp != nil {
+						dfsp := dst.toBoolSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []bool{}
+						}
+					}
+				}
+			case isPointer: // E.g., *bool
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toBoolPtr()
+					if *sfpp != nil {
+						dfpp := dst.toBoolPtr()
+						if *dfpp == nil {
+							*dfpp = Bool(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., bool
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toBool(); v {
+						*dst.toBool() = v
+					}
+				}
+			}
+		case reflect.String:
+			switch {
+			case isSlice: // E.g., []string
+				mfi.merge = func(dst, src pointer) {
+					sfsp := src.toStringSlice()
+					if *sfsp != nil {
+						dfsp := dst.toStringSlice()
+						*dfsp = append(*dfsp, *sfsp...)
+						if *dfsp == nil {
+							*dfsp = []string{}
+						}
+					}
+				}
+			case isPointer: // E.g., *string
+				mfi.merge = func(dst, src pointer) {
+					sfpp := src.toStringPtr()
+					if *sfpp != nil {
+						dfpp := dst.toStringPtr()
+						if *dfpp == nil {
+							*dfpp = String(**sfpp)
+						} else {
+							**dfpp = **sfpp
+						}
+					}
+				}
+			default: // E.g., string
+				mfi.merge = func(dst, src pointer) {
+					if v := *src.toString(); v != "" {
+						*dst.toString() = v
+					}
+				}
+			}
+		case reflect.Slice:
+			isProto3 := props.Prop[i].proto3
+			switch {
+			case isPointer:
+				panic("bad pointer in byte slice case in " + tf.Name())
+			case tf.Elem().Kind() != reflect.Uint8:
+				panic("bad element kind in byte slice case in " + tf.Name())
+			case isSlice: // E.g., [][]byte
+				mfi.merge = func(dst, src pointer) {
+					sbsp := src.toBytesSlice()
+					if *sbsp != nil {
+						dbsp := dst.toBytesSlice()
+						for _, sb := range *sbsp {
+							if sb == nil {
+								*dbsp = append(*dbsp, nil)
+							} else {
+								*dbsp = append(*dbsp, append([]byte{}, sb...))
+							}
+						}
+						if *dbsp == nil {
+							*dbsp = [][]byte{}
+						}
+					}
+				}
+			default: // E.g., []byte
+				mfi.merge = func(dst, src pointer) {
+					sbp := src.toBytes()
+					if *sbp != nil {
+						dbp := dst.toBytes()
+						if !isProto3 || len(*sbp) > 0 {
+							*dbp = append([]byte{}, *sbp...)
+						}
+					}
+				}
+			}
+		case reflect.Struct:
+			switch {
+			case isSlice && !isPointer: // E.g. []pb.T
+				mergeInfo := getMergeInfo(tf)
+				zero := reflect.Zero(tf)
+				mfi.merge = func(dst, src pointer) {
+					// TODO: Make this faster?
+					dstsp := dst.asPointerTo(f.Type)
+					dsts := dstsp.Elem()
+					srcs := src.asPointerTo(f.Type).Elem()
+					for i := 0; i < srcs.Len(); i++ {
+						dsts = reflect.Append(dsts, zero)
+						srcElement := srcs.Index(i).Addr()
+						dstElement := dsts.Index(dsts.Len() - 1).Addr()
+						mergeInfo.merge(valToPointer(dstElement), valToPointer(srcElement))
+					}
+					if dsts.IsNil() {
+						dsts = reflect.MakeSlice(f.Type, 0, 0)
+					}
+					dstsp.Elem().Set(dsts)
+				}
+			case !isPointer:
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					mergeInfo.merge(dst, src)
+				}
+			case isSlice: // E.g., []*pb.T
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sps := src.getPointerSlice()
+					if sps != nil {
+						dps := dst.getPointerSlice()
+						for _, sp := range sps {
+							var dp pointer
+							if !sp.isNil() {
+								dp = valToPointer(reflect.New(tf))
+								mergeInfo.merge(dp, sp)
+							}
+							dps = append(dps, dp)
+						}
+						if dps == nil {
+							dps = []pointer{}
+						}
+						dst.setPointerSlice(dps)
+					}
+				}
+			default: // E.g., *pb.T
+				mergeInfo := getMergeInfo(tf)
+				mfi.merge = func(dst, src pointer) {
+					sp := src.getPointer()
+					if !sp.isNil() {
+						dp := dst.getPointer()
+						if dp.isNil() {
+							dp = valToPointer(reflect.New(tf))
+							dst.setPointer(dp)
+						}
+						mergeInfo.merge(dp, sp)
+					}
+				}
+			}
+		case reflect.Map:
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in map case in " + tf.Name())
+			default: // E.g., map[K]V
+				mfi.merge = func(dst, src pointer) {
+					sm := src.asPointerTo(tf).Elem()
+					if sm.Len() == 0 {
+						return
+					}
+					dm := dst.asPointerTo(tf).Elem()
+					if dm.IsNil() {
+						dm.Set(reflect.MakeMap(tf))
+					}
+
+					switch tf.Elem().Kind() {
+					case reflect.Ptr: // Proto struct (e.g., *T)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(Clone(val.Interface().(Message)))
+							dm.SetMapIndex(key, val)
+						}
+					case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+							dm.SetMapIndex(key, val)
+						}
+					default: // Basic type (e.g., string)
+						for _, key := range sm.MapKeys() {
+							val := sm.MapIndex(key)
+							dm.SetMapIndex(key, val)
+						}
+					}
+				}
+			}
+		case reflect.Interface:
+			// Must be oneof field.
+			switch {
+			case isPointer || isSlice:
+				panic("bad pointer or slice in interface case in " + tf.Name())
+			default: // E.g., interface{}
+				// TODO: Make this faster?
+				mfi.merge = func(dst, src pointer) {
+					su := src.asPointerTo(tf).Elem()
+					if !su.IsNil() {
+						du := dst.asPointerTo(tf).Elem()
+						typ := su.Elem().Type()
+						if du.IsNil() || du.Elem().Type() != typ {
+							du.Set(reflect.New(typ.Elem())) // Initialize interface if empty
+						}
+						sv := su.Elem().Elem().Field(0)
+						if sv.Kind() == reflect.Ptr && sv.IsNil() {
+							return
+						}
+						dv := du.Elem().Elem().Field(0)
+						if dv.Kind() == reflect.Ptr && dv.IsNil() {
+							dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty
+						}
+						switch sv.Type().Kind() {
+						case reflect.Ptr: // Proto struct (e.g., *T)
+							Merge(dv.Interface().(Message), sv.Interface().(Message))
+						case reflect.Slice: // E.g. Bytes type (e.g., []byte)
+							dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...)))
+						default: // Basic type (e.g., string)
+							dv.Set(sv)
+						}
+					}
+				}
+			}
+		default:
+			panic(fmt.Sprintf("merger not found for type:%s", tf))
+		}
+		mi.fields = append(mi.fields, mfi)
+	}
+
+	mi.unrecognized = invalidField
+	if f, ok := t.FieldByName("XXX_unrecognized"); ok {
+		if f.Type != reflect.TypeOf([]byte{}) {
+			panic("expected XXX_unrecognized to be of type []byte")
+		}
+		mi.unrecognized = toField(&f)
+	}
+
+	atomic.StoreInt32(&mi.initialized, 1)
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
new file mode 100644
index 0000000..9372293
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal.go
@@ -0,0 +1,2249 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+	"sync/atomic"
+	"unicode/utf8"
+)
+
+// Unmarshal is the entry point from the generated .pb.go files.
+// This function is not intended to be used by non-generated code.
+// This function is not subject to any compatibility guarantee.
+// msg contains a pointer to a protocol buffer struct.
+// b is the data to be unmarshaled into the protocol buffer.
+// a is a pointer to a place to store cached unmarshal information.
+func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error {
+	// Load the unmarshal information for this message type.
+	// The atomic load ensures memory consistency.
+	u := atomicLoadUnmarshalInfo(&a.unmarshal)
+	if u == nil {
+		// Slow path: find unmarshal info for msg, update a with it.
+		u = getUnmarshalInfo(reflect.TypeOf(msg).Elem())
+		atomicStoreUnmarshalInfo(&a.unmarshal, u)
+	}
+	// Then do the unmarshaling.
+	err := u.unmarshal(toPointer(&msg), b)
+	return err
+}
+
+type unmarshalInfo struct {
+	typ reflect.Type // type of the protobuf struct
+
+	// 0 = only typ field is initialized
+	// 1 = completely initialized
+	initialized     int32
+	lock            sync.Mutex                    // prevents double initialization
+	dense           []unmarshalFieldInfo          // fields indexed by tag #
+	sparse          map[uint64]unmarshalFieldInfo // fields indexed by tag #
+	reqFields       []string                      // names of required fields
+	reqMask         uint64                        // 1<<len(reqFields)-1
+	unrecognized    field                         // offset of []byte to put unrecognized data (or invalidField if we should throw it away)
+	extensions      field                         // offset of extensions field (of type proto.XXX_InternalExtensions), or invalidField if it does not exist
+	oldExtensions   field                         // offset of old-form extensions field (of type map[int]Extension)
+	extensionRanges []ExtensionRange              // if non-nil, implies extensions field is valid
+	isMessageSet    bool                          // if true, implies extensions field is valid
+
+	bytesExtensions field // offset of XXX_extensions with type []byte
+}
+
+// An unmarshaler takes a stream of bytes and a pointer to a field of a message.
+// It decodes the field, stores it at f, and returns the unused bytes.
+// w is the wire encoding.
+// b is the data after the tag and wire encoding have been read.
+type unmarshaler func(b []byte, f pointer, w int) ([]byte, error)
+
+type unmarshalFieldInfo struct {
+	// location of the field in the proto message structure.
+	field field
+
+	// function to unmarshal the data for the field.
+	unmarshal unmarshaler
+
+	// if a required field, contains a single set bit at this field's index in the required field list.
+	reqMask uint64
+
+	name string // name of the field, for error reporting
+}
+
+var (
+	unmarshalInfoMap  = map[reflect.Type]*unmarshalInfo{}
+	unmarshalInfoLock sync.Mutex
+)
+
+// getUnmarshalInfo returns the data structure which can be
+// subsequently used to unmarshal a message of the given type.
+// t is the type of the message (note: not pointer to message).
+func getUnmarshalInfo(t reflect.Type) *unmarshalInfo {
+	// It would be correct to return a new unmarshalInfo
+	// unconditionally. We would end up allocating one
+	// per occurrence of that type as a message or submessage.
+	// We use a cache here just to reduce memory usage.
+	unmarshalInfoLock.Lock()
+	defer unmarshalInfoLock.Unlock()
+	u := unmarshalInfoMap[t]
+	if u == nil {
+		u = &unmarshalInfo{typ: t}
+		// Note: we just set the type here. The rest of the fields
+		// will be initialized on first use.
+		unmarshalInfoMap[t] = u
+	}
+	return u
+}
+
+// unmarshal does the main work of unmarshaling a message.
+// u provides type information used to unmarshal the message.
+// m is a pointer to a protocol buffer message.
+// b is a byte stream to unmarshal into m.
+// This is top routine used when recursively unmarshaling submessages.
+func (u *unmarshalInfo) unmarshal(m pointer, b []byte) error {
+	if atomic.LoadInt32(&u.initialized) == 0 {
+		u.computeUnmarshalInfo()
+	}
+	if u.isMessageSet {
+		return unmarshalMessageSet(b, m.offset(u.extensions).toExtensions())
+	}
+	var reqMask uint64 // bitmask of required fields we've seen.
+	var errLater error
+	for len(b) > 0 {
+		// Read tag and wire type.
+		// Special case 1 and 2 byte varints.
+		var x uint64
+		if b[0] < 128 {
+			x = uint64(b[0])
+			b = b[1:]
+		} else if len(b) >= 2 && b[1] < 128 {
+			x = uint64(b[0]&0x7f) + uint64(b[1])<<7
+			b = b[2:]
+		} else {
+			var n int
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+		}
+		tag := x >> 3
+		wire := int(x) & 7
+
+		// Dispatch on the tag to one of the unmarshal* functions below.
+		var f unmarshalFieldInfo
+		if tag < uint64(len(u.dense)) {
+			f = u.dense[tag]
+		} else {
+			f = u.sparse[tag]
+		}
+		if fn := f.unmarshal; fn != nil {
+			var err error
+			b, err = fn(b, m.offset(f.field), wire)
+			if err == nil {
+				reqMask |= f.reqMask
+				continue
+			}
+			if r, ok := err.(*RequiredNotSetError); ok {
+				// Remember this error, but keep parsing. We need to produce
+				// a full parse even if a required field is missing.
+				if errLater == nil {
+					errLater = r
+				}
+				reqMask |= f.reqMask
+				continue
+			}
+			if err != errInternalBadWireType {
+				if err == errInvalidUTF8 {
+					if errLater == nil {
+						fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name
+						errLater = &invalidUTF8Error{fullName}
+					}
+					continue
+				}
+				return err
+			}
+			// Fragments with bad wire type are treated as unknown fields.
+		}
+
+		// Unknown tag.
+		if !u.unrecognized.IsValid() {
+			// Don't keep unrecognized data; just skip it.
+			var err error
+			b, err = skipField(b, wire)
+			if err != nil {
+				return err
+			}
+			continue
+		}
+		// Keep unrecognized data around.
+		// maybe in extensions, maybe in the unrecognized field.
+		z := m.offset(u.unrecognized).toBytes()
+		var emap map[int32]Extension
+		var e Extension
+		for _, r := range u.extensionRanges {
+			if uint64(r.Start) <= tag && tag <= uint64(r.End) {
+				if u.extensions.IsValid() {
+					mp := m.offset(u.extensions).toExtensions()
+					emap = mp.extensionsWrite()
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.oldExtensions.IsValid() {
+					p := m.offset(u.oldExtensions).toOldExtensions()
+					emap = *p
+					if emap == nil {
+						emap = map[int32]Extension{}
+						*p = emap
+					}
+					e = emap[int32(tag)]
+					z = &e.enc
+					break
+				}
+				if u.bytesExtensions.IsValid() {
+					z = m.offset(u.bytesExtensions).toBytes()
+					break
+				}
+				panic("no extensions field available")
+			}
+		}
+		// Use wire type to skip data.
+		var err error
+		b0 := b
+		b, err = skipField(b, wire)
+		if err != nil {
+			return err
+		}
+		*z = encodeVarint(*z, tag<<3|uint64(wire))
+		*z = append(*z, b0[:len(b0)-len(b)]...)
+
+		if emap != nil {
+			emap[int32(tag)] = e
+		}
+	}
+	if reqMask != u.reqMask && errLater == nil {
+		// A required field of this message is missing.
+		for _, n := range u.reqFields {
+			if reqMask&1 == 0 {
+				errLater = &RequiredNotSetError{n}
+			}
+			reqMask >>= 1
+		}
+	}
+	return errLater
+}
+
+// computeUnmarshalInfo fills in u with information for use
+// in unmarshaling protocol buffers of type u.typ.
+func (u *unmarshalInfo) computeUnmarshalInfo() {
+	u.lock.Lock()
+	defer u.lock.Unlock()
+	if u.initialized != 0 {
+		return
+	}
+	t := u.typ
+	n := t.NumField()
+
+	// Set up the "not found" value for the unrecognized byte buffer.
+	// This is the default for proto3.
+	u.unrecognized = invalidField
+	u.extensions = invalidField
+	u.oldExtensions = invalidField
+	u.bytesExtensions = invalidField
+
+	// List of the generated type and offset for each oneof field.
+	type oneofField struct {
+		ityp  reflect.Type // interface type of oneof field
+		field field        // offset in containing message
+	}
+	var oneofFields []oneofField
+
+	for i := 0; i < n; i++ {
+		f := t.Field(i)
+		if f.Name == "XXX_unrecognized" {
+			// The byte slice used to hold unrecognized input is special.
+			if f.Type != reflect.TypeOf(([]byte)(nil)) {
+				panic("bad type for XXX_unrecognized field: " + f.Type.Name())
+			}
+			u.unrecognized = toField(&f)
+			continue
+		}
+		if f.Name == "XXX_InternalExtensions" {
+			// Ditto here.
+			if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) {
+				panic("bad type for XXX_InternalExtensions field: " + f.Type.Name())
+			}
+			u.extensions = toField(&f)
+			if f.Tag.Get("protobuf_messageset") == "1" {
+				u.isMessageSet = true
+			}
+			continue
+		}
+		if f.Name == "XXX_extensions" {
+			// An older form of the extensions field.
+			if f.Type == reflect.TypeOf((map[int32]Extension)(nil)) {
+				u.oldExtensions = toField(&f)
+				continue
+			} else if f.Type == reflect.TypeOf(([]byte)(nil)) {
+				u.bytesExtensions = toField(&f)
+				continue
+			}
+			panic("bad type for XXX_extensions field: " + f.Type.Name())
+		}
+		if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" {
+			continue
+		}
+
+		oneof := f.Tag.Get("protobuf_oneof")
+		if oneof != "" {
+			oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)})
+			// The rest of oneof processing happens below.
+			continue
+		}
+
+		tags := f.Tag.Get("protobuf")
+		tagArray := strings.Split(tags, ",")
+		if len(tagArray) < 2 {
+			panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags)
+		}
+		tag, err := strconv.Atoi(tagArray[1])
+		if err != nil {
+			panic("protobuf tag field not an integer: " + tagArray[1])
+		}
+
+		name := ""
+		for _, tag := range tagArray[3:] {
+			if strings.HasPrefix(tag, "name=") {
+				name = tag[5:]
+			}
+		}
+
+		// Extract unmarshaling function from the field (its type and tags).
+		unmarshal := fieldUnmarshaler(&f)
+
+		// Required field?
+		var reqMask uint64
+		if tagArray[2] == "req" {
+			bit := len(u.reqFields)
+			u.reqFields = append(u.reqFields, name)
+			reqMask = uint64(1) << uint(bit)
+			// TODO: if we have more than 64 required fields, we end up
+			// not verifying that all required fields are present.
+			// Fix this, perhaps using a count of required fields?
+		}
+
+		// Store the info in the correct slot in the message.
+		u.setTag(tag, toField(&f), unmarshal, reqMask, name)
+	}
+
+	// Find any types associated with oneof fields.
+	// gogo: len(oneofFields) > 0 is needed for embedded oneof messages, without a marshaler and unmarshaler
+	if len(oneofFields) > 0 {
+		var oneofImplementers []interface{}
+		switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) {
+		case oneofFuncsIface:
+			_, _, _, oneofImplementers = m.XXX_OneofFuncs()
+		case oneofWrappersIface:
+			oneofImplementers = m.XXX_OneofWrappers()
+		}
+		for _, v := range oneofImplementers {
+			tptr := reflect.TypeOf(v) // *Msg_X
+			typ := tptr.Elem()        // Msg_X
+
+			f := typ.Field(0) // oneof implementers have one field
+			baseUnmarshal := fieldUnmarshaler(&f)
+			tags := strings.Split(f.Tag.Get("protobuf"), ",")
+			fieldNum, err := strconv.Atoi(tags[1])
+			if err != nil {
+				panic("protobuf tag field not an integer: " + tags[1])
+			}
+			var name string
+			for _, tag := range tags {
+				if strings.HasPrefix(tag, "name=") {
+					name = strings.TrimPrefix(tag, "name=")
+					break
+				}
+			}
+
+			// Find the oneof field that this struct implements.
+			// Might take O(n^2) to process all of the oneofs, but who cares.
+			for _, of := range oneofFields {
+				if tptr.Implements(of.ityp) {
+					// We have found the corresponding interface for this struct.
+					// That lets us know where this struct should be stored
+					// when we encounter it during unmarshaling.
+					unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal)
+					u.setTag(fieldNum, of.field, unmarshal, 0, name)
+				}
+			}
+
+		}
+	}
+
+	// Get extension ranges, if any.
+	fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray")
+	if fn.IsValid() {
+		if !u.extensions.IsValid() && !u.oldExtensions.IsValid() && !u.bytesExtensions.IsValid() {
+			panic("a message with extensions, but no extensions field in " + t.Name())
+		}
+		u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange)
+	}
+
+	// Explicitly disallow tag 0. This will ensure we flag an error
+	// when decoding a buffer of all zeros. Without this code, we
+	// would decode and skip an all-zero buffer of even length.
+	// [0 0] is [tag=0/wiretype=varint varint-encoded-0].
+	u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) {
+		return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w)
+	}, 0, "")
+
+	// Set mask for required field check.
+	u.reqMask = uint64(1)<<uint(len(u.reqFields)) - 1
+
+	atomic.StoreInt32(&u.initialized, 1)
+}
+
+// setTag stores the unmarshal information for the given tag.
+// tag = tag # for field
+// field/unmarshal = unmarshal info for that field.
+// reqMask = if required, bitmask for field position in required field list. 0 otherwise.
+// name = short name of the field.
+func (u *unmarshalInfo) setTag(tag int, field field, unmarshal unmarshaler, reqMask uint64, name string) {
+	i := unmarshalFieldInfo{field: field, unmarshal: unmarshal, reqMask: reqMask, name: name}
+	n := u.typ.NumField()
+	if tag >= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here?
+		for len(u.dense) <= tag {
+			u.dense = append(u.dense, unmarshalFieldInfo{})
+		}
+		u.dense[tag] = i
+		return
+	}
+	if u.sparse == nil {
+		u.sparse = map[uint64]unmarshalFieldInfo{}
+	}
+	u.sparse[uint64(tag)] = i
+}
+
+// fieldUnmarshaler returns an unmarshaler for the given field.
+func fieldUnmarshaler(f *reflect.StructField) unmarshaler {
+	if f.Type.Kind() == reflect.Map {
+		return makeUnmarshalMap(f)
+	}
+	return typeUnmarshaler(f.Type, f.Tag.Get("protobuf"))
+}
+
+// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair.
+func typeUnmarshaler(t reflect.Type, tags string) unmarshaler {
+	tagArray := strings.Split(tags, ",")
+	encoding := tagArray[0]
+	name := "unknown"
+	ctype := false
+	isTime := false
+	isDuration := false
+	isWktPointer := false
+	proto3 := false
+	validateUTF8 := true
+	for _, tag := range tagArray[3:] {
+		if strings.HasPrefix(tag, "name=") {
+			name = tag[5:]
+		}
+		if tag == "proto3" {
+			proto3 = true
+		}
+		if strings.HasPrefix(tag, "customtype=") {
+			ctype = true
+		}
+		if tag == "stdtime" {
+			isTime = true
+		}
+		if tag == "stdduration" {
+			isDuration = true
+		}
+		if tag == "wktptr" {
+			isWktPointer = true
+		}
+	}
+	validateUTF8 = validateUTF8 && proto3
+
+	// Figure out packaging (pointer, slice, or both)
+	slice := false
+	pointer := false
+	if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 {
+		slice = true
+		t = t.Elem()
+	}
+	if t.Kind() == reflect.Ptr {
+		pointer = true
+		t = t.Elem()
+	}
+
+	if ctype {
+		if reflect.PtrTo(t).Implements(customType) {
+			if slice {
+				return makeUnmarshalCustomSlice(getUnmarshalInfo(t), name)
+			}
+			if pointer {
+				return makeUnmarshalCustomPtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalCustom(getUnmarshalInfo(t), name)
+		} else {
+			panic(fmt.Sprintf("custom type: type: %v, does not implement the proto.custom interface", t))
+		}
+	}
+
+	if isTime {
+		if pointer {
+			if slice {
+				return makeUnmarshalTimePtrSlice(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalTimePtr(getUnmarshalInfo(t), name)
+		}
+		if slice {
+			return makeUnmarshalTimeSlice(getUnmarshalInfo(t), name)
+		}
+		return makeUnmarshalTime(getUnmarshalInfo(t), name)
+	}
+
+	if isDuration {
+		if pointer {
+			if slice {
+				return makeUnmarshalDurationPtrSlice(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalDurationPtr(getUnmarshalInfo(t), name)
+		}
+		if slice {
+			return makeUnmarshalDurationSlice(getUnmarshalInfo(t), name)
+		}
+		return makeUnmarshalDuration(getUnmarshalInfo(t), name)
+	}
+
+	if isWktPointer {
+		switch t.Kind() {
+		case reflect.Float64:
+			if pointer {
+				if slice {
+					return makeStdDoubleValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdDoubleValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdDoubleValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdDoubleValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Float32:
+			if pointer {
+				if slice {
+					return makeStdFloatValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdFloatValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdFloatValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdFloatValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Int64:
+			if pointer {
+				if slice {
+					return makeStdInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Uint64:
+			if pointer {
+				if slice {
+					return makeStdUInt64ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdUInt64ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdUInt64ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdUInt64ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Int32:
+			if pointer {
+				if slice {
+					return makeStdInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Uint32:
+			if pointer {
+				if slice {
+					return makeStdUInt32ValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdUInt32ValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdUInt32ValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdUInt32ValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.Bool:
+			if pointer {
+				if slice {
+					return makeStdBoolValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdBoolValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdBoolValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdBoolValueUnmarshaler(getUnmarshalInfo(t), name)
+		case reflect.String:
+			if pointer {
+				if slice {
+					return makeStdStringValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdStringValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdStringValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdStringValueUnmarshaler(getUnmarshalInfo(t), name)
+		case uint8SliceType:
+			if pointer {
+				if slice {
+					return makeStdBytesValuePtrSliceUnmarshaler(getUnmarshalInfo(t), name)
+				}
+				return makeStdBytesValuePtrUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			if slice {
+				return makeStdBytesValueSliceUnmarshaler(getUnmarshalInfo(t), name)
+			}
+			return makeStdBytesValueUnmarshaler(getUnmarshalInfo(t), name)
+		default:
+			panic(fmt.Sprintf("unknown wktpointer type %#v", t))
+		}
+	}
+
+	// We'll never have both pointer and slice for basic types.
+	if pointer && slice && t.Kind() != reflect.Struct {
+		panic("both pointer and slice for basic type in " + t.Name())
+	}
+
+	switch t.Kind() {
+	case reflect.Bool:
+		if pointer {
+			return unmarshalBoolPtr
+		}
+		if slice {
+			return unmarshalBoolSlice
+		}
+		return unmarshalBoolValue
+	case reflect.Int32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixedS32Ptr
+			}
+			if slice {
+				return unmarshalFixedS32Slice
+			}
+			return unmarshalFixedS32Value
+		case "varint":
+			// this could be int32 or enum
+			if pointer {
+				return unmarshalInt32Ptr
+			}
+			if slice {
+				return unmarshalInt32Slice
+			}
+			return unmarshalInt32Value
+		case "zigzag32":
+			if pointer {
+				return unmarshalSint32Ptr
+			}
+			if slice {
+				return unmarshalSint32Slice
+			}
+			return unmarshalSint32Value
+		}
+	case reflect.Int64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixedS64Ptr
+			}
+			if slice {
+				return unmarshalFixedS64Slice
+			}
+			return unmarshalFixedS64Value
+		case "varint":
+			if pointer {
+				return unmarshalInt64Ptr
+			}
+			if slice {
+				return unmarshalInt64Slice
+			}
+			return unmarshalInt64Value
+		case "zigzag64":
+			if pointer {
+				return unmarshalSint64Ptr
+			}
+			if slice {
+				return unmarshalSint64Slice
+			}
+			return unmarshalSint64Value
+		}
+	case reflect.Uint32:
+		switch encoding {
+		case "fixed32":
+			if pointer {
+				return unmarshalFixed32Ptr
+			}
+			if slice {
+				return unmarshalFixed32Slice
+			}
+			return unmarshalFixed32Value
+		case "varint":
+			if pointer {
+				return unmarshalUint32Ptr
+			}
+			if slice {
+				return unmarshalUint32Slice
+			}
+			return unmarshalUint32Value
+		}
+	case reflect.Uint64:
+		switch encoding {
+		case "fixed64":
+			if pointer {
+				return unmarshalFixed64Ptr
+			}
+			if slice {
+				return unmarshalFixed64Slice
+			}
+			return unmarshalFixed64Value
+		case "varint":
+			if pointer {
+				return unmarshalUint64Ptr
+			}
+			if slice {
+				return unmarshalUint64Slice
+			}
+			return unmarshalUint64Value
+		}
+	case reflect.Float32:
+		if pointer {
+			return unmarshalFloat32Ptr
+		}
+		if slice {
+			return unmarshalFloat32Slice
+		}
+		return unmarshalFloat32Value
+	case reflect.Float64:
+		if pointer {
+			return unmarshalFloat64Ptr
+		}
+		if slice {
+			return unmarshalFloat64Slice
+		}
+		return unmarshalFloat64Value
+	case reflect.Map:
+		panic("map type in typeUnmarshaler in " + t.Name())
+	case reflect.Slice:
+		if pointer {
+			panic("bad pointer in slice case in " + t.Name())
+		}
+		if slice {
+			return unmarshalBytesSlice
+		}
+		return unmarshalBytesValue
+	case reflect.String:
+		if validateUTF8 {
+			if pointer {
+				return unmarshalUTF8StringPtr
+			}
+			if slice {
+				return unmarshalUTF8StringSlice
+			}
+			return unmarshalUTF8StringValue
+		}
+		if pointer {
+			return unmarshalStringPtr
+		}
+		if slice {
+			return unmarshalStringSlice
+		}
+		return unmarshalStringValue
+	case reflect.Struct:
+		// message or group field
+		if !pointer {
+			switch encoding {
+			case "bytes":
+				if slice {
+					return makeUnmarshalMessageSlice(getUnmarshalInfo(t), name)
+				}
+				return makeUnmarshalMessage(getUnmarshalInfo(t), name)
+			}
+		}
+		switch encoding {
+		case "bytes":
+			if slice {
+				return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name)
+		case "group":
+			if slice {
+				return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name)
+			}
+			return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name)
+		}
+	}
+	panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding))
+}
+
+// Below are all the unmarshalers for individual fields of various types.
+
+func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x)
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x)
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64() = v
+	return b, nil
+}
+
+func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	*f.toInt64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int64(x>>1) ^ int64(x)<<63>>63
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int64(x>>1) ^ int64(x)<<63>>63
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64() = v
+	return b, nil
+}
+
+func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	*f.toUint64Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint64(x)
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint64(x)
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x)
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x)
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	*f.toInt32() = v
+	return b, nil
+}
+
+func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.setInt32Ptr(v)
+	return b, nil
+}
+
+func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := int32(x>>1) ^ int32(x)<<31>>31
+			f.appendInt32Slice(v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := int32(x>>1) ^ int32(x)<<31>>31
+	f.appendInt32Slice(v)
+	return b, nil
+}
+
+func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32() = v
+	return b, nil
+}
+
+func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	*f.toUint32Ptr() = &v
+	return b, nil
+}
+
+func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			b = b[n:]
+			v := uint32(x)
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	v := uint32(x)
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b, nil
+}
+
+func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	*f.toUint64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+			s := f.toUint64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
+	s := f.toUint64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64() = v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	*f.toInt64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+			s := f.toInt64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56
+	s := f.toInt64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	*f.toUint32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+			s := f.toUint32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
+	s := f.toUint32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	*f.toInt32() = v
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.setInt32Ptr(v)
+	return b[4:], nil
+}
+
+func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+			f.appendInt32Slice(v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24
+	f.appendInt32Slice(v)
+	return b[4:], nil
+}
+
+func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	// Note: any length varint is allowed, even though any sane
+	// encoder will use one byte.
+	// See https://github.com/golang/protobuf/issues/76
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// TODO: check if x>1? Tests seem to indicate no.
+	v := x != 0
+	*f.toBool() = v
+	return b[n:], nil
+}
+
+func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	*f.toBoolPtr() = &v
+	return b[n:], nil
+}
+
+func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			x, n = decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := x != 0
+			s := f.toBoolSlice()
+			*s = append(*s, v)
+			b = b[n:]
+		}
+		return res, nil
+	}
+	if w != WireVarint {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := x != 0
+	s := f.toBoolSlice()
+	*s = append(*s, v)
+	return b[n:], nil
+}
+
+func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64() = v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	*f.toFloat64Ptr() = &v
+	return b[8:], nil
+}
+
+func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 8 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+			s := f.toFloat64Slice()
+			*s = append(*s, v)
+			b = b[8:]
+		}
+		return res, nil
+	}
+	if w != WireFixed64 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 8 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56)
+	s := f.toFloat64Slice()
+	*s = append(*s, v)
+	return b[8:], nil
+}
+
+func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32() = v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	*f.toFloat32Ptr() = &v
+	return b[4:], nil
+}
+
+func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) {
+	if w == WireBytes { // packed
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		res := b[x:]
+		b = b[:x]
+		for len(b) > 0 {
+			if len(b) < 4 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+			s := f.toFloat32Slice()
+			*s = append(*s, v)
+			b = b[4:]
+		}
+		return res, nil
+	}
+	if w != WireFixed32 {
+		return b, errInternalBadWireType
+	}
+	if len(b) < 4 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24)
+	s := f.toFloat32Slice()
+	*s = append(*s, v)
+	return b[4:], nil
+}
+
+func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	return b[x:], nil
+}
+
+func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	return b[x:], nil
+}
+
+func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toString() = v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	*f.toStringPtr() = &v
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := string(b[:x])
+	s := f.toStringSlice()
+	*s = append(*s, v)
+	if !utf8.ValidString(v) {
+		return b[x:], errInvalidUTF8
+	}
+	return b[x:], nil
+}
+
+var emptyBuf [0]byte
+
+func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	// The use of append here is a trick which avoids the zeroing
+	// that would be required if we used a make/copy pair.
+	// We append to emptyBuf instead of nil because we want
+	// a non-nil result even when the length is 0.
+	v := append(emptyBuf[:], b[:x]...)
+	*f.toBytes() = v
+	return b[x:], nil
+}
+
+func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) {
+	if w != WireBytes {
+		return b, errInternalBadWireType
+	}
+	x, n := decodeVarint(b)
+	if n == 0 {
+		return nil, io.ErrUnexpectedEOF
+	}
+	b = b[n:]
+	if x > uint64(len(b)) {
+		return nil, io.ErrUnexpectedEOF
+	}
+	v := append(emptyBuf[:], b[:x]...)
+	s := f.toBytesSlice()
+	*s = append(*s, v)
+	return b[x:], nil
+}
+
+func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return b, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireStartGroup {
+			return b, errInternalBadWireType
+		}
+		x, y := findEndGroup(b)
+		if x < 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendPointer(v)
+		return b[y:], err
+	}
+}
+
+func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
+	t := f.Type
+	kt := t.Key()
+	vt := t.Elem()
+	tagArray := strings.Split(f.Tag.Get("protobuf"), ",")
+	valTags := strings.Split(f.Tag.Get("protobuf_val"), ",")
+	for _, t := range tagArray {
+		if strings.HasPrefix(t, "customtype=") {
+			valTags = append(valTags, t)
+		}
+		if t == "stdtime" {
+			valTags = append(valTags, t)
+		}
+		if t == "stdduration" {
+			valTags = append(valTags, t)
+		}
+		if t == "wktptr" {
+			valTags = append(valTags, t)
+		}
+	}
+	unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key"))
+	unmarshalVal := typeUnmarshaler(vt, strings.Join(valTags, ","))
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// The map entry is a submessage. Figure out how big it is.
+		if w != WireBytes {
+			return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes)
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		r := b[x:] // unused data to return
+		b = b[:x]  // data for map entry
+
+		// Note: we could use #keys * #values ~= 200 functions
+		// to do map decoding without reflection. Probably not worth it.
+		// Maps will be somewhat slow. Oh well.
+
+		// Read key and value from data.
+		var nerr nonFatal
+		k := reflect.New(kt)
+		v := reflect.New(vt)
+		for len(b) > 0 {
+			x, n := decodeVarint(b)
+			if n == 0 {
+				return nil, io.ErrUnexpectedEOF
+			}
+			wire := int(x) & 7
+			b = b[n:]
+
+			var err error
+			switch x >> 3 {
+			case 1:
+				b, err = unmarshalKey(b, valToPointer(k), wire)
+			case 2:
+				b, err = unmarshalVal(b, valToPointer(v), wire)
+			default:
+				err = errInternalBadWireType // skip unknown tag
+			}
+
+			if nerr.Merge(err) {
+				continue
+			}
+			if err != errInternalBadWireType {
+				return nil, err
+			}
+
+			// Skip past unknown fields.
+			b, err = skipField(b, wire)
+			if err != nil {
+				return nil, err
+			}
+		}
+
+		// Get map, allocate if needed.
+		m := f.asPointerTo(t).Elem() // an addressable map[K]T
+		if m.IsNil() {
+			m.Set(reflect.MakeMap(t))
+		}
+
+		// Insert into map.
+		m.SetMapIndex(k.Elem(), v.Elem())
+
+		return r, nerr.E
+	}
+}
+
+// makeUnmarshalOneof makes an unmarshaler for oneof fields.
+// for:
+// message Msg {
+//   oneof F {
+//     int64 X = 1;
+//     float64 Y = 2;
+//   }
+// }
+// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
+// ityp is the interface type of the oneof field (e.g. isMsg_F).
+// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).
+// Note that this function will be called once for each case in the oneof.
+func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler {
+	sf := typ.Field(0)
+	field0 := toField(&sf)
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		// Allocate holder for value.
+		v := reflect.New(typ)
+
+		// Unmarshal data into holder.
+		// We unmarshal into the first field of the holder object.
+		var err error
+		var nerr nonFatal
+		b, err = unmarshal(b, valToPointer(v).offset(field0), w)
+		if !nerr.Merge(err) {
+			return nil, err
+		}
+
+		// Write pointer to holder into target field.
+		f.asPointerTo(ityp).Elem().Set(v)
+
+		return b, nerr.E
+	}
+}
+
+// Error used by decode internally.
+var errInternalBadWireType = errors.New("proto: internal error: bad wiretype")
+
+// skipField skips past a field of type wire and returns the remaining bytes.
+func skipField(b []byte, wire int) ([]byte, error) {
+	switch wire {
+	case WireVarint:
+		_, k := decodeVarint(b)
+		if k == 0 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[k:]
+	case WireFixed32:
+		if len(b) < 4 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[4:]
+	case WireFixed64:
+		if len(b) < 8 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[8:]
+	case WireBytes:
+		m, k := decodeVarint(b)
+		if k == 0 || uint64(len(b)-k) < m {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[uint64(k)+m:]
+	case WireStartGroup:
+		_, i := findEndGroup(b)
+		if i == -1 {
+			return b, io.ErrUnexpectedEOF
+		}
+		b = b[i:]
+	default:
+		return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire)
+	}
+	return b, nil
+}
+
+// findEndGroup finds the index of the next EndGroup tag.
+// Groups may be nested, so the "next" EndGroup tag is the first
+// unpaired EndGroup.
+// findEndGroup returns the indexes of the start and end of the EndGroup tag.
+// Returns (-1,-1) if it can't find one.
+func findEndGroup(b []byte) (int, int) {
+	depth := 1
+	i := 0
+	for {
+		x, n := decodeVarint(b[i:])
+		if n == 0 {
+			return -1, -1
+		}
+		j := i
+		i += n
+		switch x & 7 {
+		case WireVarint:
+			_, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+		case WireFixed32:
+			if len(b)-4 < i {
+				return -1, -1
+			}
+			i += 4
+		case WireFixed64:
+			if len(b)-8 < i {
+				return -1, -1
+			}
+			i += 8
+		case WireBytes:
+			m, k := decodeVarint(b[i:])
+			if k == 0 {
+				return -1, -1
+			}
+			i += k
+			if uint64(len(b)-i) < m {
+				return -1, -1
+			}
+			i += int(m)
+		case WireStartGroup:
+			depth++
+		case WireEndGroup:
+			depth--
+			if depth == 0 {
+				return j, i
+			}
+		default:
+			return -1, -1
+		}
+	}
+}
+
+// encodeVarint appends a varint-encoded integer to b and returns the result.
+func encodeVarint(b []byte, x uint64) []byte {
+	for x >= 1<<7 {
+		b = append(b, byte(x&0x7f|0x80))
+		x >>= 7
+	}
+	return append(b, byte(x))
+}
+
+// decodeVarint reads a varint-encoded integer from b.
+// Returns the decoded integer and the number of bytes read.
+// If there is an error, it returns 0,0.
+func decodeVarint(b []byte) (uint64, int) {
+	var x, y uint64
+	if len(b) == 0 {
+		goto bad
+	}
+	x = uint64(b[0])
+	if x < 0x80 {
+		return x, 1
+	}
+	x -= 0x80
+
+	if len(b) <= 1 {
+		goto bad
+	}
+	y = uint64(b[1])
+	x += y << 7
+	if y < 0x80 {
+		return x, 2
+	}
+	x -= 0x80 << 7
+
+	if len(b) <= 2 {
+		goto bad
+	}
+	y = uint64(b[2])
+	x += y << 14
+	if y < 0x80 {
+		return x, 3
+	}
+	x -= 0x80 << 14
+
+	if len(b) <= 3 {
+		goto bad
+	}
+	y = uint64(b[3])
+	x += y << 21
+	if y < 0x80 {
+		return x, 4
+	}
+	x -= 0x80 << 21
+
+	if len(b) <= 4 {
+		goto bad
+	}
+	y = uint64(b[4])
+	x += y << 28
+	if y < 0x80 {
+		return x, 5
+	}
+	x -= 0x80 << 28
+
+	if len(b) <= 5 {
+		goto bad
+	}
+	y = uint64(b[5])
+	x += y << 35
+	if y < 0x80 {
+		return x, 6
+	}
+	x -= 0x80 << 35
+
+	if len(b) <= 6 {
+		goto bad
+	}
+	y = uint64(b[6])
+	x += y << 42
+	if y < 0x80 {
+		return x, 7
+	}
+	x -= 0x80 << 42
+
+	if len(b) <= 7 {
+		goto bad
+	}
+	y = uint64(b[7])
+	x += y << 49
+	if y < 0x80 {
+		return x, 8
+	}
+	x -= 0x80 << 49
+
+	if len(b) <= 8 {
+		goto bad
+	}
+	y = uint64(b[8])
+	x += y << 56
+	if y < 0x80 {
+		return x, 9
+	}
+	x -= 0x80 << 56
+
+	if len(b) <= 9 {
+		goto bad
+	}
+	y = uint64(b[9])
+	x += y << 63
+	if y < 2 {
+		return x, 10
+	}
+
+bad:
+	return 0, 0
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
new file mode 100644
index 0000000..00d6c7a
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/table_unmarshal_gogo.go
@@ -0,0 +1,385 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"io"
+	"reflect"
+)
+
+func makeUnmarshalMessage(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		// First read the message field to see if something is there.
+		// The semantics of multiple submessages are weird.  Instead of
+		// the last one winning (as it is for all other fields), multiple
+		// submessages are merged.
+		v := f // gogo: changed from v := f.getPointer()
+		if v.isNil() {
+			v = valToPointer(reflect.New(sub.typ))
+			f.setPointer(v)
+		}
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalMessageSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		v := valToPointer(reflect.New(sub.typ))
+		err := sub.unmarshal(v, b[:x])
+		if err != nil {
+			if r, ok := err.(*RequiredNotSetError); ok {
+				r.field = name + "." + r.field
+			} else {
+				return nil, err
+			}
+		}
+		f.appendRef(v, sub.typ) // gogo: changed from f.appendPointer(v)
+		return b[x:], err
+	}
+}
+
+func makeUnmarshalCustomPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.New(sub.typ))
+		m := s.Interface().(custom)
+		if err := m.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalCustomSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := reflect.New(sub.typ)
+		c := m.Interface().(custom)
+		if err := c.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		v := valToPointer(m)
+		f.appendRef(v, sub.typ)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalCustom(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+
+		m := f.asPointerTo(sub.typ).Interface().(custom)
+		if err := m.Unmarshal(b[:x]); err != nil {
+			return nil, err
+		}
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTime(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(t))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimePtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&t))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimePtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&t))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalTimeSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &timestamp{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		t, err := timestampFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(t))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationPtr(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&d))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDuration(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(d))
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationPtrSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&d))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeUnmarshalDurationSlice(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &duration{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		d, err := durationFromProto(m)
+		if err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(d))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go
new file mode 100644
index 0000000..87416af
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text.go
@@ -0,0 +1,930 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+	"bufio"
+	"bytes"
+	"encoding"
+	"errors"
+	"fmt"
+	"io"
+	"log"
+	"math"
+	"reflect"
+	"sort"
+	"strings"
+	"sync"
+	"time"
+)
+
+var (
+	newline         = []byte("\n")
+	spaces          = []byte("                                        ")
+	endBraceNewline = []byte("}\n")
+	backslashN      = []byte{'\\', 'n'}
+	backslashR      = []byte{'\\', 'r'}
+	backslashT      = []byte{'\\', 't'}
+	backslashDQ     = []byte{'\\', '"'}
+	backslashBS     = []byte{'\\', '\\'}
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+type writer interface {
+	io.Writer
+	WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	ind      int
+	complete bool // if the current position is a complete line
+	compact  bool // whether to write out as a one-liner
+	w        writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+	if !strings.Contains(s, "\n") {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.complete = false
+		return io.WriteString(w.w, s)
+	}
+	// WriteString is typically called without newlines, so this
+	// codepath and its copy are rare.  We copy to avoid
+	// duplicating all of Write's logic here.
+	return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		n, err = w.w.Write(p)
+		w.complete = false
+		return n, err
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				if err := w.w.WriteByte(' '); err != nil {
+					return n, err
+				}
+				n++
+			}
+			nn, err := w.w.Write(frag)
+			n += nn
+			if err != nil {
+				return n, err
+			}
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		nn, err := w.w.Write(frag)
+		n += nn
+		if err != nil {
+			return n, err
+		}
+		if i+1 < len(frags) {
+			if err := w.w.WriteByte('\n'); err != nil {
+				return n, err
+			}
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	err := w.w.WriteByte(c)
+	w.complete = c == '\n'
+	return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+	if w.ind == 0 {
+		log.Print("proto: textWriter unindented too far")
+		return
+	}
+	w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+	if _, err := w.WriteString(props.OrigName); err != nil {
+		return err
+	}
+	if props.Wire != "group" {
+		return w.WriteByte(':')
+	}
+	return nil
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// isAny reports whether sv is a google.protobuf.Any message
+func isAny(sv reflect.Value) bool {
+	type wkt interface {
+		XXX_WellKnownType() string
+	}
+	t, ok := sv.Addr().Interface().(wkt)
+	return ok && t.XXX_WellKnownType() == "Any"
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) {
+	turl := sv.FieldByName("TypeUrl")
+	val := sv.FieldByName("Value")
+	if !turl.IsValid() || !val.IsValid() {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	b, ok := val.Interface().([]byte)
+	if !ok {
+		return true, errors.New("proto: invalid google.protobuf.Any message")
+	}
+
+	parts := strings.Split(turl.String(), "/")
+	mt := MessageType(parts[len(parts)-1])
+	if mt == nil {
+		return false, nil
+	}
+	m := reflect.New(mt.Elem())
+	if err := Unmarshal(b, m.Interface().(Message)); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	u := turl.String()
+	if requiresQuotes(u) {
+		writeString(w, u)
+	} else {
+		w.Write([]byte(u))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.ind++
+	}
+	if err := tm.writeStruct(w, m.Elem()); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.ind--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error {
+	if tm.ExpandAny && isAny(sv) {
+		if canExpand, err := tm.writeProto3Any(w, sv); canExpand {
+			return err
+		}
+	}
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < sv.NumField(); i++ {
+		fv := sv.Field(i)
+		props := sprops.Prop[i]
+		name := st.Field(i).Name
+
+		if name == "XXX_NoUnkeyedLiteral" {
+			continue
+		}
+
+		if strings.HasPrefix(name, "XXX_") {
+			// There are two XXX_ fields:
+			//   XXX_unrecognized []byte
+			//   XXX_extensions   map[int32]proto.Extension
+			// The first is handled here;
+			// the second is handled at the bottom of this function.
+			if name == "XXX_unrecognized" && !fv.IsNil() {
+				if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Ptr && fv.IsNil() {
+			// Field not filled in. This could be an optional field or
+			// a required field that wasn't filled in. Either way, there
+			// isn't anything we can show for it.
+			continue
+		}
+		if fv.Kind() == reflect.Slice && fv.IsNil() {
+			// Repeated field that is empty, or a bytes field that is unused.
+			continue
+		}
+
+		if props.Repeated && fv.Kind() == reflect.Slice {
+			// Repeated field.
+			for j := 0; j < fv.Len(); j++ {
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				v := fv.Index(j)
+				if v.Kind() == reflect.Ptr && v.IsNil() {
+					// A nil message in a repeated field is not valid,
+					// but we can handle that more gracefully than panicking.
+					if _, err := w.Write([]byte("<nil>\n")); err != nil {
+						return err
+					}
+					continue
+				}
+				if len(props.Enum) > 0 {
+					if err := tm.writeEnum(w, v, props); err != nil {
+						return err
+					}
+				} else if err := tm.writeAny(w, v, props); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if fv.Kind() == reflect.Map {
+			// Map fields are rendered as a repeated struct with key/value fields.
+			keys := fv.MapKeys()
+			sort.Sort(mapKeys(keys))
+			for _, key := range keys {
+				val := fv.MapIndex(key)
+				if err := writeName(w, props); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				// open struct
+				if err := w.WriteByte('<'); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				w.indent()
+				// key
+				if _, err := w.WriteString("key:"); err != nil {
+					return err
+				}
+				if !w.compact {
+					if err := w.WriteByte(' '); err != nil {
+						return err
+					}
+				}
+				if err := tm.writeAny(w, key, props.MapKeyProp); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+				// nil values aren't legal, but we can avoid panicking because of them.
+				if val.Kind() != reflect.Ptr || !val.IsNil() {
+					// value
+					if _, err := w.WriteString("value:"); err != nil {
+						return err
+					}
+					if !w.compact {
+						if err := w.WriteByte(' '); err != nil {
+							return err
+						}
+					}
+					if err := tm.writeAny(w, val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := w.WriteByte('\n'); err != nil {
+						return err
+					}
+				}
+				// close struct
+				w.unindent()
+				if err := w.WriteByte('>'); err != nil {
+					return err
+				}
+				if err := w.WriteByte('\n'); err != nil {
+					return err
+				}
+			}
+			continue
+		}
+		if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+			// empty bytes field
+			continue
+		}
+		if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+			// proto3 non-repeated scalar field; skip if zero value
+			if isProto3Zero(fv) {
+				continue
+			}
+		}
+
+		if fv.Kind() == reflect.Interface {
+			// Check if it is a oneof.
+			if st.Field(i).Tag.Get("protobuf_oneof") != "" {
+				// fv is nil, or holds a pointer to generated struct.
+				// That generated struct has exactly one field,
+				// which has a protobuf struct tag.
+				if fv.IsNil() {
+					continue
+				}
+				inner := fv.Elem().Elem() // interface -> *T -> T
+				tag := inner.Type().Field(0).Tag.Get("protobuf")
+				props = new(Properties) // Overwrite the outer props var, but not its pointee.
+				props.Parse(tag)
+				// Write the value in the oneof, not the oneof itself.
+				fv = inner.Field(0)
+
+				// Special case to cope with malformed messages gracefully:
+				// If the value in the oneof is a nil pointer, don't panic
+				// in writeAny.
+				if fv.Kind() == reflect.Ptr && fv.IsNil() {
+					// Use errors.New so writeAny won't render quotes.
+					msg := errors.New("/* nil */")
+					fv = reflect.ValueOf(&msg).Elem()
+				}
+			}
+		}
+
+		if err := writeName(w, props); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+
+		if len(props.Enum) > 0 {
+			if err := tm.writeEnum(w, fv, props); err != nil {
+				return err
+			}
+		} else if err := tm.writeAny(w, fv, props); err != nil {
+			return err
+		}
+
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+
+	// Extensions (the XXX_extensions field).
+	pv := sv
+	if pv.CanAddr() {
+		pv = sv.Addr()
+	} else {
+		pv = reflect.New(sv.Type())
+		pv.Elem().Set(sv)
+	}
+	if _, err := extendable(pv.Interface()); err == nil {
+		if err := tm.writeExtensions(w, pv); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem()
+
+// writeAny writes an arbitrary field.
+func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+	v = reflect.Indirect(v)
+
+	if props != nil {
+		if len(props.CustomType) > 0 {
+			custom, ok := v.Interface().(Marshaler)
+			if ok {
+				data, err := custom.Marshal()
+				if err != nil {
+					return err
+				}
+				if err := writeString(w, string(data)); err != nil {
+					return err
+				}
+				return nil
+			}
+		} else if len(props.CastType) > 0 {
+			if _, ok := v.Interface().(interface {
+				String() string
+			}); ok {
+				switch v.Kind() {
+				case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
+					reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+					_, err := fmt.Fprintf(w, "%d", v.Interface())
+					return err
+				}
+			}
+		} else if props.StdTime {
+			t, ok := v.Interface().(time.Time)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface())
+			}
+			tproto, err := timestampProto(t)
+			if err != nil {
+				return err
+			}
+			propsCopy := *props // Make a copy so that this is goroutine-safe
+			propsCopy.StdTime = false
+			err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy)
+			return err
+		} else if props.StdDuration {
+			d, ok := v.Interface().(time.Duration)
+			if !ok {
+				return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface())
+			}
+			dproto := durationProto(d)
+			propsCopy := *props // Make a copy so that this is goroutine-safe
+			propsCopy.StdDuration = false
+			err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy)
+			return err
+		}
+	}
+
+	// Floats have special cases.
+	if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+		x := v.Float()
+		var b []byte
+		switch {
+		case math.IsInf(x, 1):
+			b = posInf
+		case math.IsInf(x, -1):
+			b = negInf
+		case math.IsNaN(x):
+			b = nan
+		}
+		if b != nil {
+			_, err := w.Write(b)
+			return err
+		}
+		// Other values are handled below.
+	}
+
+	// We don't attempt to serialise every possible value type; only those
+	// that can occur in protocol buffers.
+	switch v.Kind() {
+	case reflect.Slice:
+		// Should only be a []byte; repeated fields are handled in writeStruct.
+		if err := writeString(w, string(v.Bytes())); err != nil {
+			return err
+		}
+	case reflect.String:
+		if err := writeString(w, v.String()); err != nil {
+			return err
+		}
+	case reflect.Struct:
+		// Required/optional group/message.
+		var bra, ket byte = '<', '>'
+		if props != nil && props.Wire == "group" {
+			bra, ket = '{', '}'
+		}
+		if err := w.WriteByte(bra); err != nil {
+			return err
+		}
+		if !w.compact {
+			if err := w.WriteByte('\n'); err != nil {
+				return err
+			}
+		}
+		w.indent()
+		if v.CanAddr() {
+			// Calling v.Interface on a struct causes the reflect package to
+			// copy the entire struct. This is racy with the new Marshaler
+			// since we atomically update the XXX_sizecache.
+			//
+			// Thus, we retrieve a pointer to the struct if possible to avoid
+			// a race since v.Interface on the pointer doesn't copy the struct.
+			//
+			// If v is not addressable, then we are not worried about a race
+			// since it implies that the binary Marshaler cannot possibly be
+			// mutating this value.
+			v = v.Addr()
+		}
+		if v.Type().Implements(textMarshalerType) {
+			text, err := v.Interface().(encoding.TextMarshaler).MarshalText()
+			if err != nil {
+				return err
+			}
+			if _, err = w.Write(text); err != nil {
+				return err
+			}
+		} else {
+			if v.Kind() == reflect.Ptr {
+				v = v.Elem()
+			}
+			if err := tm.writeStruct(w, v); err != nil {
+				return err
+			}
+		}
+		w.unindent()
+		if err := w.WriteByte(ket); err != nil {
+			return err
+		}
+	default:
+		_, err := fmt.Fprint(w, v.Interface())
+		return err
+	}
+	return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+	return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+	// use WriteByte here to get any needed indent
+	if err := w.WriteByte('"'); err != nil {
+		return err
+	}
+	// Loop over the bytes, not the runes.
+	for i := 0; i < len(s); i++ {
+		var err error
+		// Divergence from C++: we don't escape apostrophes.
+		// There's no need to escape them, and the C++ parser
+		// copes with a naked apostrophe.
+		switch c := s[i]; c {
+		case '\n':
+			_, err = w.w.Write(backslashN)
+		case '\r':
+			_, err = w.w.Write(backslashR)
+		case '\t':
+			_, err = w.w.Write(backslashT)
+		case '"':
+			_, err = w.w.Write(backslashDQ)
+		case '\\':
+			_, err = w.w.Write(backslashBS)
+		default:
+			if isprint(c) {
+				err = w.w.WriteByte(c)
+			} else {
+				_, err = fmt.Fprintf(w.w, "\\%03o", c)
+			}
+		}
+		if err != nil {
+			return err
+		}
+	}
+	return w.WriteByte('"')
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+	if !w.compact {
+		if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+			return err
+		}
+	}
+	b := NewBuffer(data)
+	for b.index < len(b.buf) {
+		x, err := b.DecodeVarint()
+		if err != nil {
+			_, ferr := fmt.Fprintf(w, "/* %v */\n", err)
+			return ferr
+		}
+		wire, tag := x&7, x>>3
+		if wire == WireEndGroup {
+			w.unindent()
+			if _, werr := w.Write(endBraceNewline); werr != nil {
+				return werr
+			}
+			continue
+		}
+		if _, ferr := fmt.Fprint(w, tag); ferr != nil {
+			return ferr
+		}
+		if wire != WireStartGroup {
+			if err = w.WriteByte(':'); err != nil {
+				return err
+			}
+		}
+		if !w.compact || wire == WireStartGroup {
+			if err = w.WriteByte(' '); err != nil {
+				return err
+			}
+		}
+		switch wire {
+		case WireBytes:
+			buf, e := b.DecodeRawBytes(false)
+			if e == nil {
+				_, err = fmt.Fprintf(w, "%q", buf)
+			} else {
+				_, err = fmt.Fprintf(w, "/* %v */", e)
+			}
+		case WireFixed32:
+			x, err = b.DecodeFixed32()
+			err = writeUnknownInt(w, x, err)
+		case WireFixed64:
+			x, err = b.DecodeFixed64()
+			err = writeUnknownInt(w, x, err)
+		case WireStartGroup:
+			err = w.WriteByte('{')
+			w.indent()
+		case WireVarint:
+			x, err = b.DecodeVarint()
+			err = writeUnknownInt(w, x, err)
+		default:
+			_, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+		}
+		if err != nil {
+			return err
+		}
+		if err := w.WriteByte('\n'); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+	if err == nil {
+		_, err = fmt.Fprint(w, x)
+	} else {
+		_, err = fmt.Fprintf(w, "/* %v */", err)
+	}
+	return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int           { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int)      { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error {
+	emap := extensionMaps[pv.Type().Elem()]
+	e := pv.Interface().(Message)
+
+	var m map[int32]Extension
+	var mu sync.Locker
+	if em, ok := e.(extensionsBytes); ok {
+		eb := em.GetExtensions()
+		var err error
+		m, err = BytesToExtensionsMap(*eb)
+		if err != nil {
+			return err
+		}
+		mu = notLocker{}
+	} else if _, ok := e.(extendableProto); ok {
+		ep, _ := extendable(e)
+		m, mu = ep.extensionsRead()
+		if m == nil {
+			return nil
+		}
+	}
+
+	// Order the extensions by ID.
+	// This isn't strictly necessary, but it will give us
+	// canonical output, which will also make testing easier.
+
+	mu.Lock()
+	ids := make([]int32, 0, len(m))
+	for id := range m {
+		ids = append(ids, id)
+	}
+	sort.Sort(int32Slice(ids))
+	mu.Unlock()
+
+	for _, extNum := range ids {
+		ext := m[extNum]
+		var desc *ExtensionDesc
+		if emap != nil {
+			desc = emap[extNum]
+		}
+		if desc == nil {
+			// Unknown extension.
+			if err := writeUnknownStruct(w, ext.enc); err != nil {
+				return err
+			}
+			continue
+		}
+
+		pb, err := GetExtension(e, desc)
+		if err != nil {
+			return fmt.Errorf("failed getting extension: %v", err)
+		}
+
+		// Repeated extensions will appear as a slice.
+		if !desc.repeated() {
+			if err := tm.writeExtension(w, desc.Name, pb); err != nil {
+				return err
+			}
+		} else {
+			v := reflect.ValueOf(pb)
+			for i := 0; i < v.Len(); i++ {
+				if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error {
+	if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+		return err
+	}
+	if !w.compact {
+		if err := w.WriteByte(' '); err != nil {
+			return err
+		}
+	}
+	if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+		return err
+	}
+	if err := w.WriteByte('\n'); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	remain := w.ind * 2
+	for remain > 0 {
+		n := remain
+		if n > len(spaces) {
+			n = len(spaces)
+		}
+		w.w.Write(spaces[:n])
+		remain -= n
+	}
+	w.complete = false
+}
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line).
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error {
+	val := reflect.ValueOf(pb)
+	if pb == nil || val.IsNil() {
+		w.Write([]byte("<nil>"))
+		return nil
+	}
+	var bw *bufio.Writer
+	ww, ok := w.(writer)
+	if !ok {
+		bw = bufio.NewWriter(w)
+		ww = bw
+	}
+	aw := &textWriter{
+		w:        ww,
+		complete: true,
+		compact:  tm.Compact,
+	}
+
+	if etm, ok := pb.(encoding.TextMarshaler); ok {
+		text, err := etm.MarshalText()
+		if err != nil {
+			return err
+		}
+		if _, err = aw.Write(text); err != nil {
+			return err
+		}
+		if bw != nil {
+			return bw.Flush()
+		}
+		return nil
+	}
+	// Dereference the received pointer so we don't have outer < and >.
+	v := reflect.Indirect(val)
+	if err := tm.writeStruct(aw, v); err != nil {
+		return err
+	}
+	if bw != nil {
+		return bw.Flush()
+	}
+	return nil
+}
+
+// Text is the same as Marshal, but returns the string directly.
+func (tm *TextMarshaler) Text(pb Message) string {
+	var buf bytes.Buffer
+	tm.Marshal(&buf, pb)
+	return buf.String()
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// TODO: consider removing some of the Marshal functions below.
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) }
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) }
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) }
diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
new file mode 100644
index 0000000..1d6c6aa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go
@@ -0,0 +1,57 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+)
+
+func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error {
+	m, ok := enumStringMaps[props.Enum]
+	if !ok {
+		if err := tm.writeAny(w, v, props); err != nil {
+			return err
+		}
+	}
+	key := int32(0)
+	if v.Kind() == reflect.Ptr {
+		key = int32(v.Elem().Int())
+	} else {
+		key = int32(v.Int())
+	}
+	s, ok := m[key]
+	if !ok {
+		if err := tm.writeAny(w, v, props); err != nil {
+			return err
+		}
+	}
+	_, err := fmt.Fprint(w, s)
+	return err
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go
new file mode 100644
index 0000000..f85c0cc
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go
@@ -0,0 +1,1018 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+	"unicode/utf8"
+)
+
+// Error string emitted when deserializing Any and fields are already set
+const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set"
+
+type ParseError struct {
+	Message string
+	Line    int // 1-based line number
+	Offset  int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+	if p.Line == 1 {
+		// show offset only for first line
+		return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+	}
+	return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+	if t.err == nil {
+		return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+	}
+	return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+var (
+	errBadUTF8 = errors.New("proto: bad UTF-8")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(rune(i)), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	for i := 0; i < st.NumField(); i++ {
+		if !isNil(sv.Field(i)) {
+			continue
+		}
+
+		props := sprops.Prop[i]
+		if props.Required {
+			return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+		}
+	}
+	return &RequiredNotSetError{fmt.Sprintf("%v.<unknown field name>", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) {
+	i, ok := sprops.decoderOrigNames[name]
+	if ok {
+		return i, sprops.Prop[i], true
+	}
+	return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		// Colon is optional when the field is a group or message.
+		needColon := true
+		switch props.Wire {
+		case "group":
+			needColon = false
+		case "bytes":
+			// A "bytes" field is either a message, a string, or a repeated field;
+			// those three become *T, *string and []T respectively, so we can check for
+			// this field being a pointer to a non-string.
+			if typ.Kind() == reflect.Ptr {
+				// *T or *string
+				if typ.Elem().Kind() == reflect.String {
+					break
+				}
+			} else if typ.Kind() == reflect.Slice {
+				// []T or []*T
+				if typ.Elem().Kind() != reflect.Ptr {
+					break
+				}
+			} else if typ.Kind() == reflect.String {
+				// The proto3 exception is for a string field,
+				// which requires a colon.
+				break
+			}
+			needColon = false
+		}
+		if needColon {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+	st := sv.Type()
+	sprops := GetProperties(st)
+	reqCount := sprops.reqCount
+	var reqFieldErr error
+	fieldSet := make(map[string]bool)
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			// Looks like an extension or an Any.
+			//
+			// TODO: Check whether we need to handle
+			// namespace rooted names (e.g. ".something.Foo").
+			extName, err := p.consumeExtName()
+			if err != nil {
+				return err
+			}
+
+			if s := strings.LastIndex(extName, "/"); s >= 0 {
+				// If it contains a slash, it's an Any type URL.
+				messageName := extName[s+1:]
+				mt := MessageType(messageName)
+				if mt == nil {
+					return p.errorf("unrecognized message %q in google.protobuf.Any", messageName)
+				}
+				tok = p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				// consume an optional colon
+				if tok.value == ":" {
+					tok = p.next()
+					if tok.err != nil {
+						return tok.err
+					}
+				}
+				var terminator string
+				switch tok.value {
+				case "<":
+					terminator = ">"
+				case "{":
+					terminator = "}"
+				default:
+					return p.errorf("expected '{' or '<', found %q", tok.value)
+				}
+				v := reflect.New(mt.Elem())
+				if pe := p.readStruct(v.Elem(), terminator); pe != nil {
+					return pe
+				}
+				b, err := Marshal(v.Interface().(Message))
+				if err != nil {
+					return p.errorf("failed to marshal message of type %q: %v", messageName, err)
+				}
+				if fieldSet["type_url"] {
+					return p.errorf(anyRepeatedlyUnpacked, "type_url")
+				}
+				if fieldSet["value"] {
+					return p.errorf(anyRepeatedlyUnpacked, "value")
+				}
+				sv.FieldByName("TypeUrl").SetString(extName)
+				sv.FieldByName("Value").SetBytes(b)
+				fieldSet["type_url"] = true
+				fieldSet["value"] = true
+				continue
+			}
+
+			var desc *ExtensionDesc
+			// This could be faster, but it's functional.
+			// TODO: Do something smarter than a linear scan.
+			for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+				if d.Name == extName {
+					desc = d
+					break
+				}
+			}
+			if desc == nil {
+				return p.errorf("unrecognized extension %q", extName)
+			}
+
+			props := &Properties{}
+			props.Parse(desc.Tag)
+
+			typ := reflect.TypeOf(desc.ExtensionType)
+			if err := p.checkForColon(props, typ); err != nil {
+				return err
+			}
+
+			rep := desc.repeated()
+
+			// Read the extension structure, and set it in
+			// the value we're constructing.
+			var ext reflect.Value
+			if !rep {
+				ext = reflect.New(typ).Elem()
+			} else {
+				ext = reflect.New(typ.Elem()).Elem()
+			}
+			if err := p.readAny(ext, props); err != nil {
+				if _, ok := err.(*RequiredNotSetError); !ok {
+					return err
+				}
+				reqFieldErr = err
+			}
+			ep := sv.Addr().Interface().(Message)
+			if !rep {
+				SetExtension(ep, desc, ext.Interface())
+			} else {
+				old, err := GetExtension(ep, desc)
+				var sl reflect.Value
+				if err == nil {
+					sl = reflect.ValueOf(old) // existing slice
+				} else {
+					sl = reflect.MakeSlice(typ, 0, 1)
+				}
+				sl = reflect.Append(sl, ext)
+				SetExtension(ep, desc, sl.Interface())
+			}
+			if err := p.consumeOptionalSeparator(); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := tok.value
+		var dst reflect.Value
+		fi, props, ok := structFieldByName(sprops, name)
+		if ok {
+			dst = sv.Field(fi)
+		} else if oop, ok := sprops.OneofTypes[name]; ok {
+			// It is a oneof.
+			props = oop.Prop
+			nv := reflect.New(oop.Type.Elem())
+			dst = nv.Elem().Field(0)
+			field := sv.Field(oop.Field)
+			if !field.IsNil() {
+				return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name)
+			}
+			field.Set(nv)
+		}
+		if !dst.IsValid() {
+			return p.errorf("unknown field name %q in %v", name, st)
+		}
+
+		if dst.Kind() == reflect.Map {
+			// Consume any colon.
+			if err := p.checkForColon(props, dst.Type()); err != nil {
+				return err
+			}
+
+			// Construct the map if it doesn't already exist.
+			if dst.IsNil() {
+				dst.Set(reflect.MakeMap(dst.Type()))
+			}
+			key := reflect.New(dst.Type().Key()).Elem()
+			val := reflect.New(dst.Type().Elem()).Elem()
+
+			// The map entry should be this sequence of tokens:
+			//	< key : KEY value : VALUE >
+			// However, implementations may omit key or value, and technically
+			// we should support them in any order.  See b/28924776 for a time
+			// this went wrong.
+
+			tok := p.next()
+			var terminator string
+			switch tok.value {
+			case "<":
+				terminator = ">"
+			case "{":
+				terminator = "}"
+			default:
+				return p.errorf("expected '{' or '<', found %q", tok.value)
+			}
+			for {
+				tok := p.next()
+				if tok.err != nil {
+					return tok.err
+				}
+				if tok.value == terminator {
+					break
+				}
+				switch tok.value {
+				case "key":
+					if err := p.consumeToken(":"); err != nil {
+						return err
+					}
+					if err := p.readAny(key, props.MapKeyProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				case "value":
+					if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil {
+						return err
+					}
+					if err := p.readAny(val, props.MapValProp); err != nil {
+						return err
+					}
+					if err := p.consumeOptionalSeparator(); err != nil {
+						return err
+					}
+				default:
+					p.back()
+					return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+				}
+			}
+
+			dst.SetMapIndex(key, val)
+			continue
+		}
+
+		// Check that it's not already set if it's not a repeated field.
+		if !props.Repeated && fieldSet[name] {
+			return p.errorf("non-repeated field %q was repeated", name)
+		}
+
+		if err := p.checkForColon(props, dst.Type()); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		fieldSet[name] = true
+		if err := p.readAny(dst, props); err != nil {
+			if _, ok := err.(*RequiredNotSetError); !ok {
+				return err
+			}
+			reqFieldErr = err
+		}
+		if props.Required {
+			reqCount--
+		}
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+
+	}
+
+	if reqCount > 0 {
+		return p.missingRequiredFieldError(sv)
+	}
+	return reqFieldErr
+}
+
+// consumeExtName consumes extension name or expanded Any type URL and the
+// following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in readStruct to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value == "" {
+		return p.errorf("unexpected EOF")
+	}
+	if len(props.CustomType) > 0 {
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				tc := reflect.TypeOf(new(Marshaler))
+				ok := t.Elem().Implements(tc.Elem())
+				if ok {
+					fv := v
+					flen := fv.Len()
+					if flen == fv.Cap() {
+						nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1)
+						reflect.Copy(nav, fv)
+						fv.Set(nav)
+					}
+					fv.SetLen(flen + 1)
+
+					// Read one.
+					p.back()
+					return p.readAny(fv.Index(flen), props)
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler)
+			err := custom.Unmarshal([]byte(tok.unquoted))
+			if err != nil {
+				return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+			}
+			v.Set(reflect.ValueOf(custom))
+		} else {
+			custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler)
+			err := custom.Unmarshal([]byte(tok.unquoted))
+			if err != nil {
+				return p.errorf("%v %v: %v", err, v.Type(), tok.value)
+			}
+			v.Set(reflect.Indirect(reflect.ValueOf(custom)))
+		}
+		return nil
+	}
+	if props.StdTime {
+		fv := v
+		p.back()
+		props.StdTime = false
+		tproto := &timestamp{}
+		err := p.readAny(reflect.ValueOf(tproto).Elem(), props)
+		props.StdTime = true
+		if err != nil {
+			return err
+		}
+		tim, err := timestampFromProto(tproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ts := fv.Interface().([]*time.Time)
+					ts = append(ts, &tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				} else {
+					ts := fv.Interface().([]time.Time)
+					ts = append(ts, tim)
+					fv.Set(reflect.ValueOf(ts))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&tim))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&tim)))
+		}
+		return nil
+	}
+	if props.StdDuration {
+		fv := v
+		p.back()
+		props.StdDuration = false
+		dproto := &duration{}
+		err := p.readAny(reflect.ValueOf(dproto).Elem(), props)
+		props.StdDuration = true
+		if err != nil {
+			return err
+		}
+		dur, err := durationFromProto(dproto)
+		if err != nil {
+			return err
+		}
+		if props.Repeated {
+			t := reflect.TypeOf(v.Interface())
+			if t.Kind() == reflect.Slice {
+				if t.Elem().Kind() == reflect.Ptr {
+					ds := fv.Interface().([]*time.Duration)
+					ds = append(ds, &dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				} else {
+					ds := fv.Interface().([]time.Duration)
+					ds = append(ds, dur)
+					fv.Set(reflect.ValueOf(ds))
+					return nil
+				}
+			}
+		}
+		if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr {
+			v.Set(reflect.ValueOf(&dur))
+		} else {
+			v.Set(reflect.Indirect(reflect.ValueOf(&dur)))
+		}
+		return nil
+	}
+	switch fv := v; fv.Kind() {
+	case reflect.Slice:
+		at := v.Type()
+		if at.Elem().Kind() == reflect.Uint8 {
+			// Special case for []byte
+			if tok.value[0] != '"' && tok.value[0] != '\'' {
+				// Deliberately written out here, as the error after
+				// this switch statement would write "invalid []byte: ...",
+				// which is not as user-friendly.
+				return p.errorf("invalid string: %v", tok.value)
+			}
+			bytes := []byte(tok.unquoted)
+			fv.Set(reflect.ValueOf(bytes))
+			return nil
+		}
+		// Repeated field.
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+				err := p.readAny(fv.Index(fv.Len()-1), props)
+				if err != nil {
+					return err
+				}
+				ntok := p.next()
+				if ntok.err != nil {
+					return ntok.err
+				}
+				if ntok.value == "]" {
+					break
+				}
+				if ntok.value != "," {
+					return p.errorf("Expected ']' or ',' found %q", ntok.value)
+				}
+			}
+			return nil
+		}
+		// One value of the repeated field.
+		p.back()
+		fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem()))
+		return p.readAny(fv.Index(fv.Len()-1), props)
+	case reflect.Bool:
+		// true/1/t/True or false/f/0/False.
+		switch tok.value {
+		case "true", "1", "t", "True":
+			fv.SetBool(true)
+			return nil
+		case "false", "0", "f", "False":
+			fv.SetBool(false)
+			return nil
+		}
+	case reflect.Float32, reflect.Float64:
+		v := tok.value
+		// Ignore 'f' for compatibility with output generated by C++, but don't
+		// remove 'f' when the value is "-inf" or "inf".
+		if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+			v = v[:len(v)-1]
+		}
+		if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+			fv.SetFloat(f)
+			return nil
+		}
+	case reflect.Int8:
+		if x, err := strconv.ParseInt(tok.value, 0, 8); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+	case reflect.Int16:
+		if x, err := strconv.ParseInt(tok.value, 0, 16); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+	case reflect.Int32:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+		if len(props.Enum) == 0 {
+			break
+		}
+		m, ok := enumValueMaps[props.Enum]
+		if !ok {
+			break
+		}
+		x, ok := m[tok.value]
+		if !ok {
+			break
+		}
+		fv.SetInt(int64(x))
+		return nil
+	case reflect.Int64:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			fv.SetInt(x)
+			return nil
+		}
+
+	case reflect.Ptr:
+		// A basic field (indirected through pointer), or a repeated message/group
+		p.back()
+		fv.Set(reflect.New(fv.Type().Elem()))
+		return p.readAny(fv.Elem(), props)
+	case reflect.String:
+		if tok.value[0] == '"' || tok.value[0] == '\'' {
+			fv.SetString(tok.unquoted)
+			return nil
+		}
+	case reflect.Struct:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		// TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+		return p.readStruct(fv, terminator)
+	case reflect.Uint8:
+		if x, err := strconv.ParseUint(tok.value, 0, 8); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	case reflect.Uint16:
+		if x, err := strconv.ParseUint(tok.value, 0, 16); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	case reflect.Uint32:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			fv.SetUint(uint64(x))
+			return nil
+		}
+	case reflect.Uint64:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			fv.SetUint(x)
+			return nil
+		}
+	}
+	return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+	if um, ok := pb.(encoding.TextUnmarshaler); ok {
+		return um.UnmarshalText([]byte(s))
+	}
+	pb.Reset()
+	v := reflect.ValueOf(pb)
+	return newTextParser(s).readStruct(v.Elem(), "")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go
new file mode 100644
index 0000000..9324f65
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go
@@ -0,0 +1,113 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// This file implements operations on google.protobuf.Timestamp.
+
+import (
+	"errors"
+	"fmt"
+	"time"
+)
+
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range
+// [0001-01-01, 10000-01-01) and has a Nanos field
+// in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes
+// the problem.
+//
+// Every valid Timestamp can be represented by a time.Time, but the converse is not true.
+func validateTimestamp(ts *timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %#v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %#v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
+
+// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return value
+// is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+func timestampFromProto(ts *timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+func timestampProto(t time.Time) (*timestamp, error) {
+	seconds := t.Unix()
+	nanos := int32(t.Sub(time.Unix(seconds, 0)))
+	ts := &timestamp{
+		Seconds: seconds,
+		Nanos:   nanos,
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
new file mode 100644
index 0000000..38439fa
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go
@@ -0,0 +1,49 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2016, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"reflect"
+	"time"
+)
+
+var timeType = reflect.TypeOf((*time.Time)(nil)).Elem()
+
+type timestamp struct {
+	Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"`
+	Nanos   int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"`
+}
+
+func (m *timestamp) Reset()       { *m = timestamp{} }
+func (*timestamp) ProtoMessage()  {}
+func (*timestamp) String() string { return "timestamp<string>" }
+
+func init() {
+	RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp")
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers.go b/vendor/github.com/gogo/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..b175d1b
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers.go
@@ -0,0 +1,1888 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+	"io"
+	"reflect"
+)
+
+func makeStdDoubleValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*float64)
+			v := &float64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*float64)
+			v := &float64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdDoubleValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+			v := &float64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float64)
+			v := &float64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdDoubleValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float64)
+				v := &float64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float64)
+				v := &float64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdDoubleValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float64)
+				v := &float64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float64)
+				v := &float64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdDoubleValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdDoubleValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*float32)
+			v := &float32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*float32)
+			v := &float32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdFloatValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+			v := &float32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*float32)
+			v := &float32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdFloatValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float32)
+				v := &float32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(float32)
+				v := &float32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdFloatValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float32)
+				v := &float32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*float32)
+				v := &float32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdFloatValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdFloatValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &float32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*int64)
+			v := &int64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*int64)
+			v := &int64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+			v := &int64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int64)
+			v := &int64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int64)
+				v := &int64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int64)
+				v := &int64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int64)
+				v := &int64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int64)
+				v := &int64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+			v := &uint64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint64)
+			v := &uint64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+			v := &uint64Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint64)
+			v := &uint64Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint64)
+				v := &uint64Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint64)
+				v := &uint64Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint64)
+				v := &uint64Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint64)
+				v := &uint64Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt64ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt64ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint64Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*int32)
+			v := &int32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*int32)
+			v := &int32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+			v := &int32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*int32)
+			v := &int32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int32)
+				v := &int32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(int32)
+				v := &int32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int32)
+				v := &int32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*int32)
+				v := &int32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &int32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+			v := &uint32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*uint32)
+			v := &uint32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+			v := &uint32Value{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*uint32)
+			v := &uint32Value{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint32)
+				v := &uint32Value{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(uint32)
+				v := &uint32Value{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint32)
+				v := &uint32Value{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*uint32)
+				v := &uint32Value{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdUInt32ValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdUInt32ValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &uint32Value{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*bool)
+			v := &boolValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*bool)
+			v := &boolValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBoolValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+			v := &boolValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*bool)
+			v := &boolValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBoolValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(bool)
+				v := &boolValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(bool)
+				v := &boolValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBoolValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*bool)
+				v := &boolValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*bool)
+				v := &boolValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBoolValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBoolValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &boolValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*string)
+			v := &stringValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*string)
+			v := &stringValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdStringValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+			v := &stringValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*string)
+			v := &stringValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdStringValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(string)
+				v := &stringValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(string)
+				v := &stringValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdStringValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*string)
+				v := &stringValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*string)
+				v := &stringValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdStringValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdStringValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &stringValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValueMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+			v := &bytesValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			t := ptr.asPointerTo(u.typ).Interface().(*[]byte)
+			v := &bytesValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBytesValuePtrMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			if ptr.isNil() {
+				return 0
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+			v := &bytesValue{*t}
+			siz := Size(v)
+			return tagsize + SizeVarint(uint64(siz)) + siz
+		}, func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			if ptr.isNil() {
+				return b, nil
+			}
+			t := ptr.asPointerTo(reflect.PtrTo(u.typ)).Elem().Interface().(*[]byte)
+			v := &bytesValue{*t}
+			buf, err := Marshal(v)
+			if err != nil {
+				return nil, err
+			}
+			b = appendVarint(b, wiretag)
+			b = appendVarint(b, uint64(len(buf)))
+			b = append(b, buf...)
+			return b, nil
+		}
+}
+
+func makeStdBytesValueSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(u.typ)
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().([]byte)
+				v := &bytesValue{t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(u.typ)
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().([]byte)
+				v := &bytesValue{t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBytesValuePtrSliceMarshaler(u *marshalInfo) (sizer, marshaler) {
+	return func(ptr pointer, tagsize int) int {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			n := 0
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*[]byte)
+				v := &bytesValue{*t}
+				siz := Size(v)
+				n += siz + SizeVarint(uint64(siz)) + tagsize
+			}
+			return n
+		},
+		func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) {
+			s := ptr.getSlice(reflect.PtrTo(u.typ))
+			for i := 0; i < s.Len(); i++ {
+				elem := s.Index(i)
+				t := elem.Interface().(*[]byte)
+				v := &bytesValue{*t}
+				siz := Size(v)
+				buf, err := Marshal(v)
+				if err != nil {
+					return nil, err
+				}
+				b = appendVarint(b, wiretag)
+				b = appendVarint(b, uint64(siz))
+				b = append(b, buf...)
+			}
+
+			return b, nil
+		}
+}
+
+func makeStdBytesValueUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(sub.typ).Elem()
+		s.Set(reflect.ValueOf(m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValuePtrUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		s := f.asPointerTo(reflect.PtrTo(sub.typ)).Elem()
+		s.Set(reflect.ValueOf(&m.Value))
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValuePtrSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(reflect.PtrTo(sub.typ))
+		newSlice := reflect.Append(slice, reflect.ValueOf(&m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
+
+func makeStdBytesValueSliceUnmarshaler(sub *unmarshalInfo, name string) unmarshaler {
+	return func(b []byte, f pointer, w int) ([]byte, error) {
+		if w != WireBytes {
+			return nil, errInternalBadWireType
+		}
+		x, n := decodeVarint(b)
+		if n == 0 {
+			return nil, io.ErrUnexpectedEOF
+		}
+		b = b[n:]
+		if x > uint64(len(b)) {
+			return nil, io.ErrUnexpectedEOF
+		}
+		m := &bytesValue{}
+		if err := Unmarshal(b[:x], m); err != nil {
+			return nil, err
+		}
+		slice := f.getSlice(sub.typ)
+		newSlice := reflect.Append(slice, reflect.ValueOf(m.Value))
+		slice.Set(newSlice)
+		return b[x:], nil
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
new file mode 100644
index 0000000..c1cf7bf
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/proto/wrappers_gogo.go
@@ -0,0 +1,113 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2018, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+type float64Value struct {
+	Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float64Value) Reset()       { *m = float64Value{} }
+func (*float64Value) ProtoMessage()  {}
+func (*float64Value) String() string { return "float64<string>" }
+
+type float32Value struct {
+	Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *float32Value) Reset()       { *m = float32Value{} }
+func (*float32Value) ProtoMessage()  {}
+func (*float32Value) String() string { return "float32<string>" }
+
+type int64Value struct {
+	Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int64Value) Reset()       { *m = int64Value{} }
+func (*int64Value) ProtoMessage()  {}
+func (*int64Value) String() string { return "int64<string>" }
+
+type uint64Value struct {
+	Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint64Value) Reset()       { *m = uint64Value{} }
+func (*uint64Value) ProtoMessage()  {}
+func (*uint64Value) String() string { return "uint64<string>" }
+
+type int32Value struct {
+	Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *int32Value) Reset()       { *m = int32Value{} }
+func (*int32Value) ProtoMessage()  {}
+func (*int32Value) String() string { return "int32<string>" }
+
+type uint32Value struct {
+	Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *uint32Value) Reset()       { *m = uint32Value{} }
+func (*uint32Value) ProtoMessage()  {}
+func (*uint32Value) String() string { return "uint32<string>" }
+
+type boolValue struct {
+	Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *boolValue) Reset()       { *m = boolValue{} }
+func (*boolValue) ProtoMessage()  {}
+func (*boolValue) String() string { return "bool<string>" }
+
+type stringValue struct {
+	Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *stringValue) Reset()       { *m = stringValue{} }
+func (*stringValue) ProtoMessage()  {}
+func (*stringValue) String() string { return "string<string>" }
+
+type bytesValue struct {
+	Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"`
+}
+
+func (m *bytesValue) Reset()       { *m = bytesValue{} }
+func (*bytesValue) ProtoMessage()  {}
+func (*bytesValue) String() string { return "[]byte<string>" }
+
+func init() {
+	RegisterType((*float64Value)(nil), "gogo.protobuf.proto.DoubleValue")
+	RegisterType((*float32Value)(nil), "gogo.protobuf.proto.FloatValue")
+	RegisterType((*int64Value)(nil), "gogo.protobuf.proto.Int64Value")
+	RegisterType((*uint64Value)(nil), "gogo.protobuf.proto.UInt64Value")
+	RegisterType((*int32Value)(nil), "gogo.protobuf.proto.Int32Value")
+	RegisterType((*uint32Value)(nil), "gogo.protobuf.proto.UInt32Value")
+	RegisterType((*boolValue)(nil), "gogo.protobuf.proto.BoolValue")
+	RegisterType((*stringValue)(nil), "gogo.protobuf.proto.StringValue")
+	RegisterType((*bytesValue)(nil), "gogo.protobuf.proto.BytesValue")
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
new file mode 100644
index 0000000..3496dc9
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile
@@ -0,0 +1,36 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors.  All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+#     * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+regenerate:
+	go install github.com/gogo/protobuf/protoc-gen-gogo
+	go install github.com/gogo/protobuf/protoc-gen-gostring
+	protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
+	protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
new file mode 100644
index 0000000..a85bf19
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go
@@ -0,0 +1,118 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2016 The Go Authors.  All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//     * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Package descriptor provides functions for obtaining protocol buffer
+// descriptors for generated Go types.
+//
+// These functions cannot go in package proto because they depend on the
+// generated protobuf descriptor messages, which themselves depend on proto.
+package descriptor
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+
+	"github.com/gogo/protobuf/proto"
+)
+
+// extractFile extracts a FileDescriptorProto from a gzip'd buffer.
+func extractFile(gz []byte) (*FileDescriptorProto, error) {
+	r, err := gzip.NewReader(bytes.NewReader(gz))
+	if err != nil {
+		return nil, fmt.Errorf("failed to open gzip reader: %v", err)
+	}
+	defer r.Close()
+
+	b, err := ioutil.ReadAll(r)
+	if err != nil {
+		return nil, fmt.Errorf("failed to uncompress descriptor: %v", err)
+	}
+
+	fd := new(FileDescriptorProto)
+	if err := proto.Unmarshal(b, fd); err != nil {
+		return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err)
+	}
+
+	return fd, nil
+}
+
+// Message is a proto.Message with a method to return its descriptor.
+//
+// Message types generated by the protocol compiler always satisfy
+// the Message interface.
+type Message interface {
+	proto.Message
+	Descriptor() ([]byte, []int)
+}
+
+// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it
+// describing the given message.
+func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) {
+	gz, path := msg.Descriptor()
+	fd, err := extractFile(gz)
+	if err != nil {
+		panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err))
+	}
+
+	md = fd.MessageType[path[0]]
+	for _, i := range path[1:] {
+		md = md.NestedType[i]
+	}
+	return fd, md
+}
+
+// Is this field a scalar numeric type?
+func (field *FieldDescriptorProto) IsScalar() bool {
+	if field.Type == nil {
+		return false
+	}
+	switch *field.Type {
+	case FieldDescriptorProto_TYPE_DOUBLE,
+		FieldDescriptorProto_TYPE_FLOAT,
+		FieldDescriptorProto_TYPE_INT64,
+		FieldDescriptorProto_TYPE_UINT64,
+		FieldDescriptorProto_TYPE_INT32,
+		FieldDescriptorProto_TYPE_FIXED64,
+		FieldDescriptorProto_TYPE_FIXED32,
+		FieldDescriptorProto_TYPE_BOOL,
+		FieldDescriptorProto_TYPE_UINT32,
+		FieldDescriptorProto_TYPE_ENUM,
+		FieldDescriptorProto_TYPE_SFIXED32,
+		FieldDescriptorProto_TYPE_SFIXED64,
+		FieldDescriptorProto_TYPE_SINT32,
+		FieldDescriptorProto_TYPE_SINT64:
+		return true
+	default:
+		return false
+	}
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
new file mode 100644
index 0000000..18b2a33
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go
@@ -0,0 +1,2865 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: descriptor.proto
+
+package descriptor
+
+import (
+	fmt "fmt"
+	proto "github.com/gogo/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
+
+type FieldDescriptorProto_Type int32
+
+const (
+	// 0 is reserved for errors.
+	// Order is weird for historical reasons.
+	FieldDescriptorProto_TYPE_DOUBLE FieldDescriptorProto_Type = 1
+	FieldDescriptorProto_TYPE_FLOAT  FieldDescriptorProto_Type = 2
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT64 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT64  FieldDescriptorProto_Type = 3
+	FieldDescriptorProto_TYPE_UINT64 FieldDescriptorProto_Type = 4
+	// Not ZigZag encoded.  Negative numbers take 10 bytes.  Use TYPE_SINT32 if
+	// negative values are likely.
+	FieldDescriptorProto_TYPE_INT32   FieldDescriptorProto_Type = 5
+	FieldDescriptorProto_TYPE_FIXED64 FieldDescriptorProto_Type = 6
+	FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7
+	FieldDescriptorProto_TYPE_BOOL    FieldDescriptorProto_Type = 8
+	FieldDescriptorProto_TYPE_STRING  FieldDescriptorProto_Type = 9
+	// Tag-delimited aggregate.
+	// Group type is deprecated and not supported in proto3. However, Proto3
+	// implementations should still be able to parse the group wire format and
+	// treat group fields as unknown fields.
+	FieldDescriptorProto_TYPE_GROUP   FieldDescriptorProto_Type = 10
+	FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11
+	// New in version 2.
+	FieldDescriptorProto_TYPE_BYTES    FieldDescriptorProto_Type = 12
+	FieldDescriptorProto_TYPE_UINT32   FieldDescriptorProto_Type = 13
+	FieldDescriptorProto_TYPE_ENUM     FieldDescriptorProto_Type = 14
+	FieldDescriptorProto_TYPE_SFIXED32 FieldDescriptorProto_Type = 15
+	FieldDescriptorProto_TYPE_SFIXED64 FieldDescriptorProto_Type = 16
+	FieldDescriptorProto_TYPE_SINT32   FieldDescriptorProto_Type = 17
+	FieldDescriptorProto_TYPE_SINT64   FieldDescriptorProto_Type = 18
+)
+
+var FieldDescriptorProto_Type_name = map[int32]string{
+	1:  "TYPE_DOUBLE",
+	2:  "TYPE_FLOAT",
+	3:  "TYPE_INT64",
+	4:  "TYPE_UINT64",
+	5:  "TYPE_INT32",
+	6:  "TYPE_FIXED64",
+	7:  "TYPE_FIXED32",
+	8:  "TYPE_BOOL",
+	9:  "TYPE_STRING",
+	10: "TYPE_GROUP",
+	11: "TYPE_MESSAGE",
+	12: "TYPE_BYTES",
+	13: "TYPE_UINT32",
+	14: "TYPE_ENUM",
+	15: "TYPE_SFIXED32",
+	16: "TYPE_SFIXED64",
+	17: "TYPE_SINT32",
+	18: "TYPE_SINT64",
+}
+
+var FieldDescriptorProto_Type_value = map[string]int32{
+	"TYPE_DOUBLE":   1,
+	"TYPE_FLOAT":    2,
+	"TYPE_INT64":    3,
+	"TYPE_UINT64":   4,
+	"TYPE_INT32":    5,
+	"TYPE_FIXED64":  6,
+	"TYPE_FIXED32":  7,
+	"TYPE_BOOL":     8,
+	"TYPE_STRING":   9,
+	"TYPE_GROUP":    10,
+	"TYPE_MESSAGE":  11,
+	"TYPE_BYTES":    12,
+	"TYPE_UINT32":   13,
+	"TYPE_ENUM":     14,
+	"TYPE_SFIXED32": 15,
+	"TYPE_SFIXED64": 16,
+	"TYPE_SINT32":   17,
+	"TYPE_SINT64":   18,
+}
+
+func (x FieldDescriptorProto_Type) Enum() *FieldDescriptorProto_Type {
+	p := new(FieldDescriptorProto_Type)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Type) String() string {
+	return proto.EnumName(FieldDescriptorProto_Type_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Type_value, data, "FieldDescriptorProto_Type")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Type(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{4, 0}
+}
+
+type FieldDescriptorProto_Label int32
+
+const (
+	// 0 is reserved for errors
+	FieldDescriptorProto_LABEL_OPTIONAL FieldDescriptorProto_Label = 1
+	FieldDescriptorProto_LABEL_REQUIRED FieldDescriptorProto_Label = 2
+	FieldDescriptorProto_LABEL_REPEATED FieldDescriptorProto_Label = 3
+)
+
+var FieldDescriptorProto_Label_name = map[int32]string{
+	1: "LABEL_OPTIONAL",
+	2: "LABEL_REQUIRED",
+	3: "LABEL_REPEATED",
+}
+
+var FieldDescriptorProto_Label_value = map[string]int32{
+	"LABEL_OPTIONAL": 1,
+	"LABEL_REQUIRED": 2,
+	"LABEL_REPEATED": 3,
+}
+
+func (x FieldDescriptorProto_Label) Enum() *FieldDescriptorProto_Label {
+	p := new(FieldDescriptorProto_Label)
+	*p = x
+	return p
+}
+
+func (x FieldDescriptorProto_Label) String() string {
+	return proto.EnumName(FieldDescriptorProto_Label_name, int32(x))
+}
+
+func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldDescriptorProto_Label_value, data, "FieldDescriptorProto_Label")
+	if err != nil {
+		return err
+	}
+	*x = FieldDescriptorProto_Label(value)
+	return nil
+}
+
+func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{4, 1}
+}
+
+// Generated classes can be optimized for speed or code size.
+type FileOptions_OptimizeMode int32
+
+const (
+	FileOptions_SPEED FileOptions_OptimizeMode = 1
+	// etc.
+	FileOptions_CODE_SIZE    FileOptions_OptimizeMode = 2
+	FileOptions_LITE_RUNTIME FileOptions_OptimizeMode = 3
+)
+
+var FileOptions_OptimizeMode_name = map[int32]string{
+	1: "SPEED",
+	2: "CODE_SIZE",
+	3: "LITE_RUNTIME",
+}
+
+var FileOptions_OptimizeMode_value = map[string]int32{
+	"SPEED":        1,
+	"CODE_SIZE":    2,
+	"LITE_RUNTIME": 3,
+}
+
+func (x FileOptions_OptimizeMode) Enum() *FileOptions_OptimizeMode {
+	p := new(FileOptions_OptimizeMode)
+	*p = x
+	return p
+}
+
+func (x FileOptions_OptimizeMode) String() string {
+	return proto.EnumName(FileOptions_OptimizeMode_name, int32(x))
+}
+
+func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FileOptions_OptimizeMode_value, data, "FileOptions_OptimizeMode")
+	if err != nil {
+		return err
+	}
+	*x = FileOptions_OptimizeMode(value)
+	return nil
+}
+
+func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{10, 0}
+}
+
+type FieldOptions_CType int32
+
+const (
+	// Default mode.
+	FieldOptions_STRING       FieldOptions_CType = 0
+	FieldOptions_CORD         FieldOptions_CType = 1
+	FieldOptions_STRING_PIECE FieldOptions_CType = 2
+)
+
+var FieldOptions_CType_name = map[int32]string{
+	0: "STRING",
+	1: "CORD",
+	2: "STRING_PIECE",
+}
+
+var FieldOptions_CType_value = map[string]int32{
+	"STRING":       0,
+	"CORD":         1,
+	"STRING_PIECE": 2,
+}
+
+func (x FieldOptions_CType) Enum() *FieldOptions_CType {
+	p := new(FieldOptions_CType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_CType) String() string {
+	return proto.EnumName(FieldOptions_CType_name, int32(x))
+}
+
+func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_CType_value, data, "FieldOptions_CType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_CType(value)
+	return nil
+}
+
+func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{12, 0}
+}
+
+type FieldOptions_JSType int32
+
+const (
+	// Use the default type.
+	FieldOptions_JS_NORMAL FieldOptions_JSType = 0
+	// Use JavaScript strings.
+	FieldOptions_JS_STRING FieldOptions_JSType = 1
+	// Use JavaScript numbers.
+	FieldOptions_JS_NUMBER FieldOptions_JSType = 2
+)
+
+var FieldOptions_JSType_name = map[int32]string{
+	0: "JS_NORMAL",
+	1: "JS_STRING",
+	2: "JS_NUMBER",
+}
+
+var FieldOptions_JSType_value = map[string]int32{
+	"JS_NORMAL": 0,
+	"JS_STRING": 1,
+	"JS_NUMBER": 2,
+}
+
+func (x FieldOptions_JSType) Enum() *FieldOptions_JSType {
+	p := new(FieldOptions_JSType)
+	*p = x
+	return p
+}
+
+func (x FieldOptions_JSType) String() string {
+	return proto.EnumName(FieldOptions_JSType_name, int32(x))
+}
+
+func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(FieldOptions_JSType_value, data, "FieldOptions_JSType")
+	if err != nil {
+		return err
+	}
+	*x = FieldOptions_JSType(value)
+	return nil
+}
+
+func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{12, 1}
+}
+
+// Is this method side-effect-free (or safe in HTTP parlance), or idempotent,
+// or neither? HTTP based RPC implementation may choose GET verb for safe
+// methods, and PUT verb for idempotent methods instead of the default POST.
+type MethodOptions_IdempotencyLevel int32
+
+const (
+	MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0
+	MethodOptions_NO_SIDE_EFFECTS     MethodOptions_IdempotencyLevel = 1
+	MethodOptions_IDEMPOTENT          MethodOptions_IdempotencyLevel = 2
+)
+
+var MethodOptions_IdempotencyLevel_name = map[int32]string{
+	0: "IDEMPOTENCY_UNKNOWN",
+	1: "NO_SIDE_EFFECTS",
+	2: "IDEMPOTENT",
+}
+
+var MethodOptions_IdempotencyLevel_value = map[string]int32{
+	"IDEMPOTENCY_UNKNOWN": 0,
+	"NO_SIDE_EFFECTS":     1,
+	"IDEMPOTENT":          2,
+}
+
+func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel {
+	p := new(MethodOptions_IdempotencyLevel)
+	*p = x
+	return p
+}
+
+func (x MethodOptions_IdempotencyLevel) String() string {
+	return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x))
+}
+
+func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error {
+	value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel")
+	if err != nil {
+		return err
+	}
+	*x = MethodOptions_IdempotencyLevel(value)
+	return nil
+}
+
+func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{17, 0}
+}
+
+// The protocol compiler can output a FileDescriptorSet containing the .proto
+// files it parses.
+type FileDescriptorSet struct {
+	File                 []*FileDescriptorProto `protobuf:"bytes,1,rep,name=file" json:"file,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *FileDescriptorSet) Reset()         { *m = FileDescriptorSet{} }
+func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorSet) ProtoMessage()    {}
+func (*FileDescriptorSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{0}
+}
+func (m *FileDescriptorSet) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorSet.Unmarshal(m, b)
+}
+func (m *FileDescriptorSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorSet.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorSet.Merge(m, src)
+}
+func (m *FileDescriptorSet) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorSet.Size(m)
+}
+func (m *FileDescriptorSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorSet proto.InternalMessageInfo
+
+func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto {
+	if m != nil {
+		return m.File
+	}
+	return nil
+}
+
+// Describes a complete .proto file.
+type FileDescriptorProto struct {
+	Name    *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Package *string `protobuf:"bytes,2,opt,name=package" json:"package,omitempty"`
+	// Names of files imported by this file.
+	Dependency []string `protobuf:"bytes,3,rep,name=dependency" json:"dependency,omitempty"`
+	// Indexes of the public imported files in the dependency list above.
+	PublicDependency []int32 `protobuf:"varint,10,rep,name=public_dependency,json=publicDependency" json:"public_dependency,omitempty"`
+	// Indexes of the weak imported files in the dependency list.
+	// For Google-internal migration only. Do not use.
+	WeakDependency []int32 `protobuf:"varint,11,rep,name=weak_dependency,json=weakDependency" json:"weak_dependency,omitempty"`
+	// All top-level definitions in this file.
+	MessageType []*DescriptorProto        `protobuf:"bytes,4,rep,name=message_type,json=messageType" json:"message_type,omitempty"`
+	EnumType    []*EnumDescriptorProto    `protobuf:"bytes,5,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	Service     []*ServiceDescriptorProto `protobuf:"bytes,6,rep,name=service" json:"service,omitempty"`
+	Extension   []*FieldDescriptorProto   `protobuf:"bytes,7,rep,name=extension" json:"extension,omitempty"`
+	Options     *FileOptions              `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	// This field contains optional information about the original source code.
+	// You may safely remove this entire field without harming runtime
+	// functionality of the descriptors -- the information is needed only by
+	// development tools.
+	SourceCodeInfo *SourceCodeInfo `protobuf:"bytes,9,opt,name=source_code_info,json=sourceCodeInfo" json:"source_code_info,omitempty"`
+	// The syntax of the proto file.
+	// The supported values are "proto2" and "proto3".
+	Syntax               *string  `protobuf:"bytes,12,opt,name=syntax" json:"syntax,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FileDescriptorProto) Reset()         { *m = FileDescriptorProto{} }
+func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FileDescriptorProto) ProtoMessage()    {}
+func (*FileDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{1}
+}
+func (m *FileDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileDescriptorProto.Unmarshal(m, b)
+}
+func (m *FileDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FileDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileDescriptorProto.Merge(m, src)
+}
+func (m *FileDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FileDescriptorProto.Size(m)
+}
+func (m *FileDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileDescriptorProto proto.InternalMessageInfo
+
+func (m *FileDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetPackage() string {
+	if m != nil && m.Package != nil {
+		return *m.Package
+	}
+	return ""
+}
+
+func (m *FileDescriptorProto) GetDependency() []string {
+	if m != nil {
+		return m.Dependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetPublicDependency() []int32 {
+	if m != nil {
+		return m.PublicDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetWeakDependency() []int32 {
+	if m != nil {
+		return m.WeakDependency
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetMessageType() []*DescriptorProto {
+	if m != nil {
+		return m.MessageType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetService() []*ServiceDescriptorProto {
+	if m != nil {
+		return m.Service
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetOptions() *FileOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSourceCodeInfo() *SourceCodeInfo {
+	if m != nil {
+		return m.SourceCodeInfo
+	}
+	return nil
+}
+
+func (m *FileDescriptorProto) GetSyntax() string {
+	if m != nil && m.Syntax != nil {
+		return *m.Syntax
+	}
+	return ""
+}
+
+// Describes a message type.
+type DescriptorProto struct {
+	Name           *string                           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Field          []*FieldDescriptorProto           `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+	Extension      []*FieldDescriptorProto           `protobuf:"bytes,6,rep,name=extension" json:"extension,omitempty"`
+	NestedType     []*DescriptorProto                `protobuf:"bytes,3,rep,name=nested_type,json=nestedType" json:"nested_type,omitempty"`
+	EnumType       []*EnumDescriptorProto            `protobuf:"bytes,4,rep,name=enum_type,json=enumType" json:"enum_type,omitempty"`
+	ExtensionRange []*DescriptorProto_ExtensionRange `protobuf:"bytes,5,rep,name=extension_range,json=extensionRange" json:"extension_range,omitempty"`
+	OneofDecl      []*OneofDescriptorProto           `protobuf:"bytes,8,rep,name=oneof_decl,json=oneofDecl" json:"oneof_decl,omitempty"`
+	Options        *MessageOptions                   `protobuf:"bytes,7,opt,name=options" json:"options,omitempty"`
+	ReservedRange  []*DescriptorProto_ReservedRange  `protobuf:"bytes,9,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved field names, which may not be used by fields in the same message.
+	// A given name may only be reserved once.
+	ReservedName         []string `protobuf:"bytes,10,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto) Reset()         { *m = DescriptorProto{} }
+func (m *DescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto) ProtoMessage()    {}
+func (*DescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{2}
+}
+func (m *DescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto.Unmarshal(m, b)
+}
+func (m *DescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto.Merge(m, src)
+}
+func (m *DescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto.Size(m)
+}
+func (m *DescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto proto.InternalMessageInfo
+
+func (m *DescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *DescriptorProto) GetField() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtension() []*FieldDescriptorProto {
+	if m != nil {
+		return m.Extension
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetNestedType() []*DescriptorProto {
+	if m != nil {
+		return m.NestedType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetEnumType() []*EnumDescriptorProto {
+	if m != nil {
+		return m.EnumType
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetExtensionRange() []*DescriptorProto_ExtensionRange {
+	if m != nil {
+		return m.ExtensionRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOneofDecl() []*OneofDescriptorProto {
+	if m != nil {
+		return m.OneofDecl
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetOptions() *MessageOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedRange() []*DescriptorProto_ReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *DescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+type DescriptorProto_ExtensionRange struct {
+	Start                *int32                 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32                 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	Options              *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}               `json:"-"`
+	XXX_unrecognized     []byte                 `json:"-"`
+	XXX_sizecache        int32                  `json:"-"`
+}
+
+func (m *DescriptorProto_ExtensionRange) Reset()         { *m = DescriptorProto_ExtensionRange{} }
+func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ExtensionRange) ProtoMessage()    {}
+func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{2, 0}
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.Merge(m, src)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ExtensionRange.Size(m)
+}
+func (m *DescriptorProto_ExtensionRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ExtensionRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ExtensionRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ExtensionRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Range of reserved tag numbers. Reserved tag numbers may not be used by
+// fields or extension ranges in the same message. Reserved ranges may
+// not overlap.
+type DescriptorProto_ReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DescriptorProto_ReservedRange) Reset()         { *m = DescriptorProto_ReservedRange{} }
+func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) }
+func (*DescriptorProto_ReservedRange) ProtoMessage()    {}
+func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{2, 1}
+}
+func (m *DescriptorProto_ReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Unmarshal(m, b)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Marshal(b, m, deterministic)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DescriptorProto_ReservedRange.Merge(m, src)
+}
+func (m *DescriptorProto_ReservedRange) XXX_Size() int {
+	return xxx_messageInfo_DescriptorProto_ReservedRange.Size(m)
+}
+func (m *DescriptorProto_ReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_DescriptorProto_ReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DescriptorProto_ReservedRange proto.InternalMessageInfo
+
+func (m *DescriptorProto_ReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *DescriptorProto_ReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+type ExtensionRangeOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ExtensionRangeOptions) Reset()         { *m = ExtensionRangeOptions{} }
+func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) }
+func (*ExtensionRangeOptions) ProtoMessage()    {}
+func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{3}
+}
+
+var extRange_ExtensionRangeOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ExtensionRangeOptions
+}
+
+func (m *ExtensionRangeOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ExtensionRangeOptions.Unmarshal(m, b)
+}
+func (m *ExtensionRangeOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ExtensionRangeOptions.Marshal(b, m, deterministic)
+}
+func (m *ExtensionRangeOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ExtensionRangeOptions.Merge(m, src)
+}
+func (m *ExtensionRangeOptions) XXX_Size() int {
+	return xxx_messageInfo_ExtensionRangeOptions.Size(m)
+}
+func (m *ExtensionRangeOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ExtensionRangeOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ExtensionRangeOptions proto.InternalMessageInfo
+
+func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// Describes a field within a message.
+type FieldDescriptorProto struct {
+	Name   *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number *int32                      `protobuf:"varint,3,opt,name=number" json:"number,omitempty"`
+	Label  *FieldDescriptorProto_Label `protobuf:"varint,4,opt,name=label,enum=google.protobuf.FieldDescriptorProto_Label" json:"label,omitempty"`
+	// If type_name is set, this need not be set.  If both this and type_name
+	// are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP.
+	Type *FieldDescriptorProto_Type `protobuf:"varint,5,opt,name=type,enum=google.protobuf.FieldDescriptorProto_Type" json:"type,omitempty"`
+	// For message and enum types, this is the name of the type.  If the name
+	// starts with a '.', it is fully-qualified.  Otherwise, C++-like scoping
+	// rules are used to find the type (i.e. first the nested types within this
+	// message are searched, then within the parent, on up to the root
+	// namespace).
+	TypeName *string `protobuf:"bytes,6,opt,name=type_name,json=typeName" json:"type_name,omitempty"`
+	// For extensions, this is the name of the type being extended.  It is
+	// resolved in the same manner as type_name.
+	Extendee *string `protobuf:"bytes,2,opt,name=extendee" json:"extendee,omitempty"`
+	// For numeric types, contains the original text representation of the value.
+	// For booleans, "true" or "false".
+	// For strings, contains the default text contents (not escaped in any way).
+	// For bytes, contains the C escaped value.  All bytes >= 128 are escaped.
+	// TODO(kenton):  Base-64 encode?
+	DefaultValue *string `protobuf:"bytes,7,opt,name=default_value,json=defaultValue" json:"default_value,omitempty"`
+	// If set, gives the index of a oneof in the containing type's oneof_decl
+	// list.  This field is a member of that oneof.
+	OneofIndex *int32 `protobuf:"varint,9,opt,name=oneof_index,json=oneofIndex" json:"oneof_index,omitempty"`
+	// JSON name of this field. The value is set by protocol compiler. If the
+	// user has set a "json_name" option on this field, that option's value
+	// will be used. Otherwise, it's deduced from the field's name by converting
+	// it to camelCase.
+	JsonName             *string       `protobuf:"bytes,10,opt,name=json_name,json=jsonName" json:"json_name,omitempty"`
+	Options              *FieldOptions `protobuf:"bytes,8,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *FieldDescriptorProto) Reset()         { *m = FieldDescriptorProto{} }
+func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*FieldDescriptorProto) ProtoMessage()    {}
+func (*FieldDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{4}
+}
+func (m *FieldDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldDescriptorProto.Unmarshal(m, b)
+}
+func (m *FieldDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *FieldDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldDescriptorProto.Merge(m, src)
+}
+func (m *FieldDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_FieldDescriptorProto.Size(m)
+}
+func (m *FieldDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldDescriptorProto proto.InternalMessageInfo
+
+func (m *FieldDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetLabel() FieldDescriptorProto_Label {
+	if m != nil && m.Label != nil {
+		return *m.Label
+	}
+	return FieldDescriptorProto_LABEL_OPTIONAL
+}
+
+func (m *FieldDescriptorProto) GetType() FieldDescriptorProto_Type {
+	if m != nil && m.Type != nil {
+		return *m.Type
+	}
+	return FieldDescriptorProto_TYPE_DOUBLE
+}
+
+func (m *FieldDescriptorProto) GetTypeName() string {
+	if m != nil && m.TypeName != nil {
+		return *m.TypeName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetExtendee() string {
+	if m != nil && m.Extendee != nil {
+		return *m.Extendee
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetDefaultValue() string {
+	if m != nil && m.DefaultValue != nil {
+		return *m.DefaultValue
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOneofIndex() int32 {
+	if m != nil && m.OneofIndex != nil {
+		return *m.OneofIndex
+	}
+	return 0
+}
+
+func (m *FieldDescriptorProto) GetJsonName() string {
+	if m != nil && m.JsonName != nil {
+		return *m.JsonName
+	}
+	return ""
+}
+
+func (m *FieldDescriptorProto) GetOptions() *FieldOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a oneof.
+type OneofDescriptorProto struct {
+	Name                 *string       `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Options              *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OneofDescriptorProto) Reset()         { *m = OneofDescriptorProto{} }
+func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*OneofDescriptorProto) ProtoMessage()    {}
+func (*OneofDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{5}
+}
+func (m *OneofDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofDescriptorProto.Unmarshal(m, b)
+}
+func (m *OneofDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *OneofDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofDescriptorProto.Merge(m, src)
+}
+func (m *OneofDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_OneofDescriptorProto.Size(m)
+}
+func (m *OneofDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofDescriptorProto proto.InternalMessageInfo
+
+func (m *OneofDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *OneofDescriptorProto) GetOptions() *OneofOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes an enum type.
+type EnumDescriptorProto struct {
+	Name    *string                     `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Value   []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+	Options *EnumOptions                `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	// Range of reserved numeric values. Reserved numeric values may not be used
+	// by enum values in the same enum declaration. Reserved ranges may not
+	// overlap.
+	ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"`
+	// Reserved enum value names, which may not be reused. A given name may only
+	// be reserved once.
+	ReservedName         []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto) Reset()         { *m = EnumDescriptorProto{} }
+func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto) ProtoMessage()    {}
+func (*EnumDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{6}
+}
+func (m *EnumDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto.Merge(m, src)
+}
+func (m *EnumDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto.Size(m)
+}
+func (m *EnumDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumDescriptorProto) GetValue() []*EnumValueDescriptorProto {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetOptions() *EnumOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange {
+	if m != nil {
+		return m.ReservedRange
+	}
+	return nil
+}
+
+func (m *EnumDescriptorProto) GetReservedName() []string {
+	if m != nil {
+		return m.ReservedName
+	}
+	return nil
+}
+
+// Range of reserved numeric values. Reserved values may not be used by
+// entries in the same enum. Reserved ranges may not overlap.
+//
+// Note that this is distinct from DescriptorProto.ReservedRange in that it
+// is inclusive such that it can appropriately represent the entire int32
+// domain.
+type EnumDescriptorProto_EnumReservedRange struct {
+	Start                *int32   `protobuf:"varint,1,opt,name=start" json:"start,omitempty"`
+	End                  *int32   `protobuf:"varint,2,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) Reset()         { *m = EnumDescriptorProto_EnumReservedRange{} }
+func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) }
+func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage()    {}
+func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{6, 0}
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Unmarshal(m, b)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Marshal(b, m, deterministic)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Merge(m, src)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_Size() int {
+	return xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.Size(m)
+}
+func (m *EnumDescriptorProto_EnumReservedRange) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumDescriptorProto_EnumReservedRange.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumDescriptorProto_EnumReservedRange proto.InternalMessageInfo
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 {
+	if m != nil && m.Start != nil {
+		return *m.Start
+	}
+	return 0
+}
+
+func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+// Describes a value within an enum.
+type EnumValueDescriptorProto struct {
+	Name                 *string           `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Number               *int32            `protobuf:"varint,2,opt,name=number" json:"number,omitempty"`
+	Options              *EnumValueOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *EnumValueDescriptorProto) Reset()         { *m = EnumValueDescriptorProto{} }
+func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*EnumValueDescriptorProto) ProtoMessage()    {}
+func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{7}
+}
+func (m *EnumValueDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueDescriptorProto.Unmarshal(m, b)
+}
+func (m *EnumValueDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *EnumValueDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueDescriptorProto.Merge(m, src)
+}
+func (m *EnumValueDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_EnumValueDescriptorProto.Size(m)
+}
+func (m *EnumValueDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueDescriptorProto proto.InternalMessageInfo
+
+func (m *EnumValueDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *EnumValueDescriptorProto) GetNumber() int32 {
+	if m != nil && m.Number != nil {
+		return *m.Number
+	}
+	return 0
+}
+
+func (m *EnumValueDescriptorProto) GetOptions() *EnumValueOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a service.
+type ServiceDescriptorProto struct {
+	Name                 *string                  `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	Method               []*MethodDescriptorProto `protobuf:"bytes,2,rep,name=method" json:"method,omitempty"`
+	Options              *ServiceOptions          `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *ServiceDescriptorProto) Reset()         { *m = ServiceDescriptorProto{} }
+func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*ServiceDescriptorProto) ProtoMessage()    {}
+func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{8}
+}
+func (m *ServiceDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceDescriptorProto.Unmarshal(m, b)
+}
+func (m *ServiceDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *ServiceDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceDescriptorProto.Merge(m, src)
+}
+func (m *ServiceDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_ServiceDescriptorProto.Size(m)
+}
+func (m *ServiceDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceDescriptorProto proto.InternalMessageInfo
+
+func (m *ServiceDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *ServiceDescriptorProto) GetMethod() []*MethodDescriptorProto {
+	if m != nil {
+		return m.Method
+	}
+	return nil
+}
+
+func (m *ServiceDescriptorProto) GetOptions() *ServiceOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+// Describes a method of a service.
+type MethodDescriptorProto struct {
+	Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+	// Input and output type names.  These are resolved in the same way as
+	// FieldDescriptorProto.type_name, but must refer to a message type.
+	InputType  *string        `protobuf:"bytes,2,opt,name=input_type,json=inputType" json:"input_type,omitempty"`
+	OutputType *string        `protobuf:"bytes,3,opt,name=output_type,json=outputType" json:"output_type,omitempty"`
+	Options    *MethodOptions `protobuf:"bytes,4,opt,name=options" json:"options,omitempty"`
+	// Identifies if client streams multiple client messages
+	ClientStreaming *bool `protobuf:"varint,5,opt,name=client_streaming,json=clientStreaming,def=0" json:"client_streaming,omitempty"`
+	// Identifies if server streams multiple server messages
+	ServerStreaming      *bool    `protobuf:"varint,6,opt,name=server_streaming,json=serverStreaming,def=0" json:"server_streaming,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MethodDescriptorProto) Reset()         { *m = MethodDescriptorProto{} }
+func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) }
+func (*MethodDescriptorProto) ProtoMessage()    {}
+func (*MethodDescriptorProto) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{9}
+}
+func (m *MethodDescriptorProto) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodDescriptorProto.Unmarshal(m, b)
+}
+func (m *MethodDescriptorProto) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodDescriptorProto.Marshal(b, m, deterministic)
+}
+func (m *MethodDescriptorProto) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodDescriptorProto.Merge(m, src)
+}
+func (m *MethodDescriptorProto) XXX_Size() int {
+	return xxx_messageInfo_MethodDescriptorProto.Size(m)
+}
+func (m *MethodDescriptorProto) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodDescriptorProto.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodDescriptorProto proto.InternalMessageInfo
+
+const Default_MethodDescriptorProto_ClientStreaming bool = false
+const Default_MethodDescriptorProto_ServerStreaming bool = false
+
+func (m *MethodDescriptorProto) GetName() string {
+	if m != nil && m.Name != nil {
+		return *m.Name
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetInputType() string {
+	if m != nil && m.InputType != nil {
+		return *m.InputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOutputType() string {
+	if m != nil && m.OutputType != nil {
+		return *m.OutputType
+	}
+	return ""
+}
+
+func (m *MethodDescriptorProto) GetOptions() *MethodOptions {
+	if m != nil {
+		return m.Options
+	}
+	return nil
+}
+
+func (m *MethodDescriptorProto) GetClientStreaming() bool {
+	if m != nil && m.ClientStreaming != nil {
+		return *m.ClientStreaming
+	}
+	return Default_MethodDescriptorProto_ClientStreaming
+}
+
+func (m *MethodDescriptorProto) GetServerStreaming() bool {
+	if m != nil && m.ServerStreaming != nil {
+		return *m.ServerStreaming
+	}
+	return Default_MethodDescriptorProto_ServerStreaming
+}
+
+type FileOptions struct {
+	// Sets the Java package where classes generated from this .proto will be
+	// placed.  By default, the proto package is used, but this is often
+	// inappropriate because proto packages do not normally start with backwards
+	// domain names.
+	JavaPackage *string `protobuf:"bytes,1,opt,name=java_package,json=javaPackage" json:"java_package,omitempty"`
+	// If set, all the classes from the .proto file are wrapped in a single
+	// outer class with the given name.  This applies to both Proto1
+	// (equivalent to the old "--one_java_file" option) and Proto2 (where
+	// a .proto always translates to a single class, but you may want to
+	// explicitly choose the class name).
+	JavaOuterClassname *string `protobuf:"bytes,8,opt,name=java_outer_classname,json=javaOuterClassname" json:"java_outer_classname,omitempty"`
+	// If set true, then the Java code generator will generate a separate .java
+	// file for each top-level message, enum, and service defined in the .proto
+	// file.  Thus, these types will *not* be nested inside the outer class
+	// named by java_outer_classname.  However, the outer class will still be
+	// generated to contain the file's getDescriptor() method as well as any
+	// top-level extensions defined in the file.
+	JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"`
+	// This option does nothing.
+	JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // Deprecated: Do not use.
+	// If set true, then the Java2 code generator will generate code that
+	// throws an exception whenever an attempt is made to assign a non-UTF-8
+	// byte sequence to a string field.
+	// Message reflection will do the same.
+	// However, an extension field still accepts non-UTF-8 byte sequences.
+	// This option has no effect on when used with the lite runtime.
+	JavaStringCheckUtf8 *bool                     `protobuf:"varint,27,opt,name=java_string_check_utf8,json=javaStringCheckUtf8,def=0" json:"java_string_check_utf8,omitempty"`
+	OptimizeFor         *FileOptions_OptimizeMode `protobuf:"varint,9,opt,name=optimize_for,json=optimizeFor,enum=google.protobuf.FileOptions_OptimizeMode,def=1" json:"optimize_for,omitempty"`
+	// Sets the Go package where structs generated from this .proto will be
+	// placed. If omitted, the Go package will be derived from the following:
+	//   - The basename of the package import path, if provided.
+	//   - Otherwise, the package statement in the .proto file, if present.
+	//   - Otherwise, the basename of the .proto file, without extension.
+	GoPackage *string `protobuf:"bytes,11,opt,name=go_package,json=goPackage" json:"go_package,omitempty"`
+	// Should generic services be generated in each language?  "Generic" services
+	// are not specific to any particular RPC system.  They are generated by the
+	// main code generators in each language (without additional plugins).
+	// Generic services were the only kind of service generation supported by
+	// early versions of google.protobuf.
+	//
+	// Generic services are now considered deprecated in favor of using plugins
+	// that generate code specific to your particular RPC system.  Therefore,
+	// these default to false.  Old code which depends on generic services should
+	// explicitly set them to true.
+	CcGenericServices   *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"`
+	JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"`
+	PyGenericServices   *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"`
+	PhpGenericServices  *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"`
+	// Is this file deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for everything in the file, or it will be completely ignored; in the very
+	// least, this is a formalization for deprecating files.
+	Deprecated *bool `protobuf:"varint,23,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Enables the use of arenas for the proto messages in this file. This applies
+	// only to generated classes for C++.
+	CcEnableArenas *bool `protobuf:"varint,31,opt,name=cc_enable_arenas,json=ccEnableArenas,def=0" json:"cc_enable_arenas,omitempty"`
+	// Sets the objective c class prefix which is prepended to all objective c
+	// generated classes from this .proto. There is no default.
+	ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"`
+	// Namespace for generated classes; defaults to the package.
+	CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"`
+	// By default Swift generators will take the proto package and CamelCase it
+	// replacing '.' with underscore and use that to prefix the types/symbols
+	// defined. When this options is provided, they will use this value instead
+	// to prefix the types/symbols defined.
+	SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"`
+	// Sets the php class prefix which is prepended to all php generated classes
+	// from this .proto. Default is empty.
+	PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"`
+	// Use this option to change the namespace of php generated classes. Default
+	// is empty. When this option is empty, the package name will be used for
+	// determining the namespace.
+	PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"`
+	// Use this option to change the namespace of php generated metadata classes.
+	// Default is empty. When this option is empty, the proto file name will be
+	// used for determining the namespace.
+	PhpMetadataNamespace *string `protobuf:"bytes,44,opt,name=php_metadata_namespace,json=phpMetadataNamespace" json:"php_metadata_namespace,omitempty"`
+	// Use this option to change the package of ruby generated classes. Default
+	// is empty. When this option is not set, the package name will be used for
+	// determining the ruby package.
+	RubyPackage *string `protobuf:"bytes,45,opt,name=ruby_package,json=rubyPackage" json:"ruby_package,omitempty"`
+	// The parser stores options it doesn't recognize here.
+	// See the documentation for the "Options" section above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FileOptions) Reset()         { *m = FileOptions{} }
+func (m *FileOptions) String() string { return proto.CompactTextString(m) }
+func (*FileOptions) ProtoMessage()    {}
+func (*FileOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{10}
+}
+
+var extRange_FileOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FileOptions
+}
+
+func (m *FileOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FileOptions.Unmarshal(m, b)
+}
+func (m *FileOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FileOptions.Marshal(b, m, deterministic)
+}
+func (m *FileOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FileOptions.Merge(m, src)
+}
+func (m *FileOptions) XXX_Size() int {
+	return xxx_messageInfo_FileOptions.Size(m)
+}
+func (m *FileOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FileOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FileOptions proto.InternalMessageInfo
+
+const Default_FileOptions_JavaMultipleFiles bool = false
+const Default_FileOptions_JavaStringCheckUtf8 bool = false
+const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED
+const Default_FileOptions_CcGenericServices bool = false
+const Default_FileOptions_JavaGenericServices bool = false
+const Default_FileOptions_PyGenericServices bool = false
+const Default_FileOptions_PhpGenericServices bool = false
+const Default_FileOptions_Deprecated bool = false
+const Default_FileOptions_CcEnableArenas bool = false
+
+func (m *FileOptions) GetJavaPackage() string {
+	if m != nil && m.JavaPackage != nil {
+		return *m.JavaPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaOuterClassname() string {
+	if m != nil && m.JavaOuterClassname != nil {
+		return *m.JavaOuterClassname
+	}
+	return ""
+}
+
+func (m *FileOptions) GetJavaMultipleFiles() bool {
+	if m != nil && m.JavaMultipleFiles != nil {
+		return *m.JavaMultipleFiles
+	}
+	return Default_FileOptions_JavaMultipleFiles
+}
+
+// Deprecated: Do not use.
+func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool {
+	if m != nil && m.JavaGenerateEqualsAndHash != nil {
+		return *m.JavaGenerateEqualsAndHash
+	}
+	return false
+}
+
+func (m *FileOptions) GetJavaStringCheckUtf8() bool {
+	if m != nil && m.JavaStringCheckUtf8 != nil {
+		return *m.JavaStringCheckUtf8
+	}
+	return Default_FileOptions_JavaStringCheckUtf8
+}
+
+func (m *FileOptions) GetOptimizeFor() FileOptions_OptimizeMode {
+	if m != nil && m.OptimizeFor != nil {
+		return *m.OptimizeFor
+	}
+	return Default_FileOptions_OptimizeFor
+}
+
+func (m *FileOptions) GetGoPackage() string {
+	if m != nil && m.GoPackage != nil {
+		return *m.GoPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCcGenericServices() bool {
+	if m != nil && m.CcGenericServices != nil {
+		return *m.CcGenericServices
+	}
+	return Default_FileOptions_CcGenericServices
+}
+
+func (m *FileOptions) GetJavaGenericServices() bool {
+	if m != nil && m.JavaGenericServices != nil {
+		return *m.JavaGenericServices
+	}
+	return Default_FileOptions_JavaGenericServices
+}
+
+func (m *FileOptions) GetPyGenericServices() bool {
+	if m != nil && m.PyGenericServices != nil {
+		return *m.PyGenericServices
+	}
+	return Default_FileOptions_PyGenericServices
+}
+
+func (m *FileOptions) GetPhpGenericServices() bool {
+	if m != nil && m.PhpGenericServices != nil {
+		return *m.PhpGenericServices
+	}
+	return Default_FileOptions_PhpGenericServices
+}
+
+func (m *FileOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FileOptions_Deprecated
+}
+
+func (m *FileOptions) GetCcEnableArenas() bool {
+	if m != nil && m.CcEnableArenas != nil {
+		return *m.CcEnableArenas
+	}
+	return Default_FileOptions_CcEnableArenas
+}
+
+func (m *FileOptions) GetObjcClassPrefix() string {
+	if m != nil && m.ObjcClassPrefix != nil {
+		return *m.ObjcClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetCsharpNamespace() string {
+	if m != nil && m.CsharpNamespace != nil {
+		return *m.CsharpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetSwiftPrefix() string {
+	if m != nil && m.SwiftPrefix != nil {
+		return *m.SwiftPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpClassPrefix() string {
+	if m != nil && m.PhpClassPrefix != nil {
+		return *m.PhpClassPrefix
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpNamespace() string {
+	if m != nil && m.PhpNamespace != nil {
+		return *m.PhpNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetPhpMetadataNamespace() string {
+	if m != nil && m.PhpMetadataNamespace != nil {
+		return *m.PhpMetadataNamespace
+	}
+	return ""
+}
+
+func (m *FileOptions) GetRubyPackage() string {
+	if m != nil && m.RubyPackage != nil {
+		return *m.RubyPackage
+	}
+	return ""
+}
+
+func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MessageOptions struct {
+	// Set true to use the old proto1 MessageSet wire format for extensions.
+	// This is provided for backwards-compatibility with the MessageSet wire
+	// format.  You should not use this for any other reason:  It's less
+	// efficient, has fewer features, and is more complicated.
+	//
+	// The message must be defined exactly as follows:
+	//   message Foo {
+	//     option message_set_wire_format = true;
+	//     extensions 4 to max;
+	//   }
+	// Note that the message cannot have any defined fields; MessageSets only
+	// have extensions.
+	//
+	// All extensions of your type must be singular messages; e.g. they cannot
+	// be int32s, enums, or repeated messages.
+	//
+	// Because this is an option, the above two restrictions are not enforced by
+	// the protocol compiler.
+	MessageSetWireFormat *bool `protobuf:"varint,1,opt,name=message_set_wire_format,json=messageSetWireFormat,def=0" json:"message_set_wire_format,omitempty"`
+	// Disables the generation of the standard "descriptor()" accessor, which can
+	// conflict with a field of the same name.  This is meant to make migration
+	// from proto1 easier; new code should avoid fields named "descriptor".
+	NoStandardDescriptorAccessor *bool `protobuf:"varint,2,opt,name=no_standard_descriptor_accessor,json=noStandardDescriptorAccessor,def=0" json:"no_standard_descriptor_accessor,omitempty"`
+	// Is this message deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the message, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating messages.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// Whether the message is an automatically generated map entry type for the
+	// maps field.
+	//
+	// For maps fields:
+	//     map<KeyType, ValueType> map_field = 1;
+	// The parsed descriptor looks like:
+	//     message MapFieldEntry {
+	//         option map_entry = true;
+	//         optional KeyType key = 1;
+	//         optional ValueType value = 2;
+	//     }
+	//     repeated MapFieldEntry map_field = 1;
+	//
+	// Implementations may choose not to generate the map_entry=true message, but
+	// use a native map in the target language to hold the keys and values.
+	// The reflection APIs in such implementations still need to work as
+	// if the field is a repeated message field.
+	//
+	// NOTE: Do not set the option in .proto files. Always use the maps syntax
+	// instead. The option should only be implicitly set by the proto compiler
+	// parser.
+	MapEntry *bool `protobuf:"varint,7,opt,name=map_entry,json=mapEntry" json:"map_entry,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MessageOptions) Reset()         { *m = MessageOptions{} }
+func (m *MessageOptions) String() string { return proto.CompactTextString(m) }
+func (*MessageOptions) ProtoMessage()    {}
+func (*MessageOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{11}
+}
+
+var extRange_MessageOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MessageOptions
+}
+
+func (m *MessageOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageOptions.Unmarshal(m, b)
+}
+func (m *MessageOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageOptions.Marshal(b, m, deterministic)
+}
+func (m *MessageOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageOptions.Merge(m, src)
+}
+func (m *MessageOptions) XXX_Size() int {
+	return xxx_messageInfo_MessageOptions.Size(m)
+}
+func (m *MessageOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageOptions proto.InternalMessageInfo
+
+const Default_MessageOptions_MessageSetWireFormat bool = false
+const Default_MessageOptions_NoStandardDescriptorAccessor bool = false
+const Default_MessageOptions_Deprecated bool = false
+
+func (m *MessageOptions) GetMessageSetWireFormat() bool {
+	if m != nil && m.MessageSetWireFormat != nil {
+		return *m.MessageSetWireFormat
+	}
+	return Default_MessageOptions_MessageSetWireFormat
+}
+
+func (m *MessageOptions) GetNoStandardDescriptorAccessor() bool {
+	if m != nil && m.NoStandardDescriptorAccessor != nil {
+		return *m.NoStandardDescriptorAccessor
+	}
+	return Default_MessageOptions_NoStandardDescriptorAccessor
+}
+
+func (m *MessageOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MessageOptions_Deprecated
+}
+
+func (m *MessageOptions) GetMapEntry() bool {
+	if m != nil && m.MapEntry != nil {
+		return *m.MapEntry
+	}
+	return false
+}
+
+func (m *MessageOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type FieldOptions struct {
+	// The ctype option instructs the C++ code generator to use a different
+	// representation of the field than it normally would.  See the specific
+	// options below.  This option is not yet implemented in the open source
+	// release -- sorry, we'll try to include it in a future version!
+	Ctype *FieldOptions_CType `protobuf:"varint,1,opt,name=ctype,enum=google.protobuf.FieldOptions_CType,def=0" json:"ctype,omitempty"`
+	// The packed option can be enabled for repeated primitive fields to enable
+	// a more efficient representation on the wire. Rather than repeatedly
+	// writing the tag and type for each element, the entire array is encoded as
+	// a single length-delimited blob. In proto3, only explicit setting it to
+	// false will avoid using packed encoding.
+	Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"`
+	// The jstype option determines the JavaScript type used for values of the
+	// field.  The option is permitted only for 64 bit integral and fixed types
+	// (int64, uint64, sint64, fixed64, sfixed64).  A field with jstype JS_STRING
+	// is represented as JavaScript string, which avoids loss of precision that
+	// can happen when a large value is converted to a floating point JavaScript.
+	// Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
+	// use the JavaScript "number" type.  The behavior of the default option
+	// JS_NORMAL is implementation dependent.
+	//
+	// This option is an enum to permit additional types to be added, e.g.
+	// goog.math.Integer.
+	Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"`
+	// Should this field be parsed lazily?  Lazy applies only to message-type
+	// fields.  It means that when the outer message is initially parsed, the
+	// inner message's contents will not be parsed but instead stored in encoded
+	// form.  The inner message will actually be parsed when it is first accessed.
+	//
+	// This is only a hint.  Implementations are free to choose whether to use
+	// eager or lazy parsing regardless of the value of this option.  However,
+	// setting this option true suggests that the protocol author believes that
+	// using lazy parsing on this field is worth the additional bookkeeping
+	// overhead typically needed to implement it.
+	//
+	// This option does not affect the public interface of any generated code;
+	// all method signatures remain the same.  Furthermore, thread-safety of the
+	// interface is not affected by this option; const methods remain safe to
+	// call from multiple threads concurrently, while non-const methods continue
+	// to require exclusive access.
+	//
+	//
+	// Note that implementations may choose not to check required fields within
+	// a lazy sub-message.  That is, calling IsInitialized() on the outer message
+	// may return true even if the inner message has missing required fields.
+	// This is necessary because otherwise the inner message would have to be
+	// parsed in order to perform the check, defeating the purpose of lazy
+	// parsing.  An implementation which chooses not to check required fields
+	// must be consistent about it.  That is, for any particular sub-message, the
+	// implementation must either *always* check its required fields, or *never*
+	// check its required fields, regardless of whether or not the message has
+	// been parsed.
+	Lazy *bool `protobuf:"varint,5,opt,name=lazy,def=0" json:"lazy,omitempty"`
+	// Is this field deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for accessors, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating fields.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// For Google-internal migration only. Do not use.
+	Weak *bool `protobuf:"varint,10,opt,name=weak,def=0" json:"weak,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *FieldOptions) Reset()         { *m = FieldOptions{} }
+func (m *FieldOptions) String() string { return proto.CompactTextString(m) }
+func (*FieldOptions) ProtoMessage()    {}
+func (*FieldOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{12}
+}
+
+var extRange_FieldOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_FieldOptions
+}
+
+func (m *FieldOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FieldOptions.Unmarshal(m, b)
+}
+func (m *FieldOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FieldOptions.Marshal(b, m, deterministic)
+}
+func (m *FieldOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FieldOptions.Merge(m, src)
+}
+func (m *FieldOptions) XXX_Size() int {
+	return xxx_messageInfo_FieldOptions.Size(m)
+}
+func (m *FieldOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_FieldOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FieldOptions proto.InternalMessageInfo
+
+const Default_FieldOptions_Ctype FieldOptions_CType = FieldOptions_STRING
+const Default_FieldOptions_Jstype FieldOptions_JSType = FieldOptions_JS_NORMAL
+const Default_FieldOptions_Lazy bool = false
+const Default_FieldOptions_Deprecated bool = false
+const Default_FieldOptions_Weak bool = false
+
+func (m *FieldOptions) GetCtype() FieldOptions_CType {
+	if m != nil && m.Ctype != nil {
+		return *m.Ctype
+	}
+	return Default_FieldOptions_Ctype
+}
+
+func (m *FieldOptions) GetPacked() bool {
+	if m != nil && m.Packed != nil {
+		return *m.Packed
+	}
+	return false
+}
+
+func (m *FieldOptions) GetJstype() FieldOptions_JSType {
+	if m != nil && m.Jstype != nil {
+		return *m.Jstype
+	}
+	return Default_FieldOptions_Jstype
+}
+
+func (m *FieldOptions) GetLazy() bool {
+	if m != nil && m.Lazy != nil {
+		return *m.Lazy
+	}
+	return Default_FieldOptions_Lazy
+}
+
+func (m *FieldOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_FieldOptions_Deprecated
+}
+
+func (m *FieldOptions) GetWeak() bool {
+	if m != nil && m.Weak != nil {
+		return *m.Weak
+	}
+	return Default_FieldOptions_Weak
+}
+
+func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type OneofOptions struct {
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *OneofOptions) Reset()         { *m = OneofOptions{} }
+func (m *OneofOptions) String() string { return proto.CompactTextString(m) }
+func (*OneofOptions) ProtoMessage()    {}
+func (*OneofOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{13}
+}
+
+var extRange_OneofOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_OneofOptions
+}
+
+func (m *OneofOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OneofOptions.Unmarshal(m, b)
+}
+func (m *OneofOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OneofOptions.Marshal(b, m, deterministic)
+}
+func (m *OneofOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OneofOptions.Merge(m, src)
+}
+func (m *OneofOptions) XXX_Size() int {
+	return xxx_messageInfo_OneofOptions.Size(m)
+}
+func (m *OneofOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OneofOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OneofOptions proto.InternalMessageInfo
+
+func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumOptions struct {
+	// Set this option to true to allow mapping different tag names to the same
+	// value.
+	AllowAlias *bool `protobuf:"varint,2,opt,name=allow_alias,json=allowAlias" json:"allow_alias,omitempty"`
+	// Is this enum deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum, or it will be completely ignored; in the very least, this
+	// is a formalization for deprecating enums.
+	Deprecated *bool `protobuf:"varint,3,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumOptions) Reset()         { *m = EnumOptions{} }
+func (m *EnumOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumOptions) ProtoMessage()    {}
+func (*EnumOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{14}
+}
+
+var extRange_EnumOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumOptions
+}
+
+func (m *EnumOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumOptions.Unmarshal(m, b)
+}
+func (m *EnumOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumOptions.Merge(m, src)
+}
+func (m *EnumOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumOptions.Size(m)
+}
+func (m *EnumOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumOptions proto.InternalMessageInfo
+
+const Default_EnumOptions_Deprecated bool = false
+
+func (m *EnumOptions) GetAllowAlias() bool {
+	if m != nil && m.AllowAlias != nil {
+		return *m.AllowAlias
+	}
+	return false
+}
+
+func (m *EnumOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumOptions_Deprecated
+}
+
+func (m *EnumOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type EnumValueOptions struct {
+	// Is this enum value deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the enum value, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating enum values.
+	Deprecated *bool `protobuf:"varint,1,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *EnumValueOptions) Reset()         { *m = EnumValueOptions{} }
+func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) }
+func (*EnumValueOptions) ProtoMessage()    {}
+func (*EnumValueOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{15}
+}
+
+var extRange_EnumValueOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_EnumValueOptions
+}
+
+func (m *EnumValueOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EnumValueOptions.Unmarshal(m, b)
+}
+func (m *EnumValueOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EnumValueOptions.Marshal(b, m, deterministic)
+}
+func (m *EnumValueOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EnumValueOptions.Merge(m, src)
+}
+func (m *EnumValueOptions) XXX_Size() int {
+	return xxx_messageInfo_EnumValueOptions.Size(m)
+}
+func (m *EnumValueOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_EnumValueOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EnumValueOptions proto.InternalMessageInfo
+
+const Default_EnumValueOptions_Deprecated bool = false
+
+func (m *EnumValueOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_EnumValueOptions_Deprecated
+}
+
+func (m *EnumValueOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type ServiceOptions struct {
+	// Is this service deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the service, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating services.
+	Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *ServiceOptions) Reset()         { *m = ServiceOptions{} }
+func (m *ServiceOptions) String() string { return proto.CompactTextString(m) }
+func (*ServiceOptions) ProtoMessage()    {}
+func (*ServiceOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{16}
+}
+
+var extRange_ServiceOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_ServiceOptions
+}
+
+func (m *ServiceOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ServiceOptions.Unmarshal(m, b)
+}
+func (m *ServiceOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ServiceOptions.Marshal(b, m, deterministic)
+}
+func (m *ServiceOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ServiceOptions.Merge(m, src)
+}
+func (m *ServiceOptions) XXX_Size() int {
+	return xxx_messageInfo_ServiceOptions.Size(m)
+}
+func (m *ServiceOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_ServiceOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ServiceOptions proto.InternalMessageInfo
+
+const Default_ServiceOptions_Deprecated bool = false
+
+func (m *ServiceOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_ServiceOptions_Deprecated
+}
+
+func (m *ServiceOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+type MethodOptions struct {
+	// Is this method deprecated?
+	// Depending on the target platform, this can emit Deprecated annotations
+	// for the method, or it will be completely ignored; in the very least,
+	// this is a formalization for deprecating methods.
+	Deprecated       *bool                           `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"`
+	IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"`
+	// The parser stores options it doesn't recognize here. See above.
+	UninterpretedOption          []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"`
+	XXX_NoUnkeyedLiteral         struct{}               `json:"-"`
+	proto.XXX_InternalExtensions `json:"-"`
+	XXX_unrecognized             []byte `json:"-"`
+	XXX_sizecache                int32  `json:"-"`
+}
+
+func (m *MethodOptions) Reset()         { *m = MethodOptions{} }
+func (m *MethodOptions) String() string { return proto.CompactTextString(m) }
+func (*MethodOptions) ProtoMessage()    {}
+func (*MethodOptions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{17}
+}
+
+var extRange_MethodOptions = []proto.ExtensionRange{
+	{Start: 1000, End: 536870911},
+}
+
+func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange {
+	return extRange_MethodOptions
+}
+
+func (m *MethodOptions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MethodOptions.Unmarshal(m, b)
+}
+func (m *MethodOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MethodOptions.Marshal(b, m, deterministic)
+}
+func (m *MethodOptions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MethodOptions.Merge(m, src)
+}
+func (m *MethodOptions) XXX_Size() int {
+	return xxx_messageInfo_MethodOptions.Size(m)
+}
+func (m *MethodOptions) XXX_DiscardUnknown() {
+	xxx_messageInfo_MethodOptions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MethodOptions proto.InternalMessageInfo
+
+const Default_MethodOptions_Deprecated bool = false
+const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN
+
+func (m *MethodOptions) GetDeprecated() bool {
+	if m != nil && m.Deprecated != nil {
+		return *m.Deprecated
+	}
+	return Default_MethodOptions_Deprecated
+}
+
+func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel {
+	if m != nil && m.IdempotencyLevel != nil {
+		return *m.IdempotencyLevel
+	}
+	return Default_MethodOptions_IdempotencyLevel
+}
+
+func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption {
+	if m != nil {
+		return m.UninterpretedOption
+	}
+	return nil
+}
+
+// A message representing a option the parser does not recognize. This only
+// appears in options protos created by the compiler::Parser class.
+// DescriptorPool resolves these when building Descriptor objects. Therefore,
+// options protos in descriptor objects (e.g. returned by Descriptor::options(),
+// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions
+// in them.
+type UninterpretedOption struct {
+	Name []*UninterpretedOption_NamePart `protobuf:"bytes,2,rep,name=name" json:"name,omitempty"`
+	// The value of the uninterpreted option, in whatever type the tokenizer
+	// identified it as during parsing. Exactly one of these should be set.
+	IdentifierValue      *string  `protobuf:"bytes,3,opt,name=identifier_value,json=identifierValue" json:"identifier_value,omitempty"`
+	PositiveIntValue     *uint64  `protobuf:"varint,4,opt,name=positive_int_value,json=positiveIntValue" json:"positive_int_value,omitempty"`
+	NegativeIntValue     *int64   `protobuf:"varint,5,opt,name=negative_int_value,json=negativeIntValue" json:"negative_int_value,omitempty"`
+	DoubleValue          *float64 `protobuf:"fixed64,6,opt,name=double_value,json=doubleValue" json:"double_value,omitempty"`
+	StringValue          []byte   `protobuf:"bytes,7,opt,name=string_value,json=stringValue" json:"string_value,omitempty"`
+	AggregateValue       *string  `protobuf:"bytes,8,opt,name=aggregate_value,json=aggregateValue" json:"aggregate_value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption) Reset()         { *m = UninterpretedOption{} }
+func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption) ProtoMessage()    {}
+func (*UninterpretedOption) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{18}
+}
+func (m *UninterpretedOption) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption.Unmarshal(m, b)
+}
+func (m *UninterpretedOption) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption.Merge(m, src)
+}
+func (m *UninterpretedOption) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption.Size(m)
+}
+func (m *UninterpretedOption) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption proto.InternalMessageInfo
+
+func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart {
+	if m != nil {
+		return m.Name
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetIdentifierValue() string {
+	if m != nil && m.IdentifierValue != nil {
+		return *m.IdentifierValue
+	}
+	return ""
+}
+
+func (m *UninterpretedOption) GetPositiveIntValue() uint64 {
+	if m != nil && m.PositiveIntValue != nil {
+		return *m.PositiveIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetNegativeIntValue() int64 {
+	if m != nil && m.NegativeIntValue != nil {
+		return *m.NegativeIntValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetDoubleValue() float64 {
+	if m != nil && m.DoubleValue != nil {
+		return *m.DoubleValue
+	}
+	return 0
+}
+
+func (m *UninterpretedOption) GetStringValue() []byte {
+	if m != nil {
+		return m.StringValue
+	}
+	return nil
+}
+
+func (m *UninterpretedOption) GetAggregateValue() string {
+	if m != nil && m.AggregateValue != nil {
+		return *m.AggregateValue
+	}
+	return ""
+}
+
+// The name of the uninterpreted option.  Each string represents a segment in
+// a dot-separated name.  is_extension is true iff a segment represents an
+// extension (denoted with parentheses in options specs in .proto files).
+// E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents
+// "foo.(bar.baz).qux".
+type UninterpretedOption_NamePart struct {
+	NamePart             *string  `protobuf:"bytes,1,req,name=name_part,json=namePart" json:"name_part,omitempty"`
+	IsExtension          *bool    `protobuf:"varint,2,req,name=is_extension,json=isExtension" json:"is_extension,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *UninterpretedOption_NamePart) Reset()         { *m = UninterpretedOption_NamePart{} }
+func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) }
+func (*UninterpretedOption_NamePart) ProtoMessage()    {}
+func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{18, 0}
+}
+func (m *UninterpretedOption_NamePart) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Unmarshal(m, b)
+}
+func (m *UninterpretedOption_NamePart) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Marshal(b, m, deterministic)
+}
+func (m *UninterpretedOption_NamePart) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_UninterpretedOption_NamePart.Merge(m, src)
+}
+func (m *UninterpretedOption_NamePart) XXX_Size() int {
+	return xxx_messageInfo_UninterpretedOption_NamePart.Size(m)
+}
+func (m *UninterpretedOption_NamePart) XXX_DiscardUnknown() {
+	xxx_messageInfo_UninterpretedOption_NamePart.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_UninterpretedOption_NamePart proto.InternalMessageInfo
+
+func (m *UninterpretedOption_NamePart) GetNamePart() string {
+	if m != nil && m.NamePart != nil {
+		return *m.NamePart
+	}
+	return ""
+}
+
+func (m *UninterpretedOption_NamePart) GetIsExtension() bool {
+	if m != nil && m.IsExtension != nil {
+		return *m.IsExtension
+	}
+	return false
+}
+
+// Encapsulates information about the original source file from which a
+// FileDescriptorProto was generated.
+type SourceCodeInfo struct {
+	// A Location identifies a piece of source code in a .proto file which
+	// corresponds to a particular definition.  This information is intended
+	// to be useful to IDEs, code indexers, documentation generators, and similar
+	// tools.
+	//
+	// For example, say we have a file like:
+	//   message Foo {
+	//     optional string foo = 1;
+	//   }
+	// Let's look at just the field definition:
+	//   optional string foo = 1;
+	//   ^       ^^     ^^  ^  ^^^
+	//   a       bc     de  f  ghi
+	// We have the following locations:
+	//   span   path               represents
+	//   [a,i)  [ 4, 0, 2, 0 ]     The whole field definition.
+	//   [a,b)  [ 4, 0, 2, 0, 4 ]  The label (optional).
+	//   [c,d)  [ 4, 0, 2, 0, 5 ]  The type (string).
+	//   [e,f)  [ 4, 0, 2, 0, 1 ]  The name (foo).
+	//   [g,h)  [ 4, 0, 2, 0, 3 ]  The number (1).
+	//
+	// Notes:
+	// - A location may refer to a repeated field itself (i.e. not to any
+	//   particular index within it).  This is used whenever a set of elements are
+	//   logically enclosed in a single code segment.  For example, an entire
+	//   extend block (possibly containing multiple extension definitions) will
+	//   have an outer location whose path refers to the "extensions" repeated
+	//   field without an index.
+	// - Multiple locations may have the same path.  This happens when a single
+	//   logical declaration is spread out across multiple places.  The most
+	//   obvious example is the "extend" block again -- there may be multiple
+	//   extend blocks in the same scope, each of which will have the same path.
+	// - A location's span is not always a subset of its parent's span.  For
+	//   example, the "extendee" of an extension declaration appears at the
+	//   beginning of the "extend" block and is shared by all extensions within
+	//   the block.
+	// - Just because a location's span is a subset of some other location's span
+	//   does not mean that it is a descendant.  For example, a "group" defines
+	//   both a type and a field in a single declaration.  Thus, the locations
+	//   corresponding to the type and field and their components will overlap.
+	// - Code which tries to interpret locations should probably be designed to
+	//   ignore those that it doesn't understand, as more types of locations could
+	//   be recorded in the future.
+	Location             []*SourceCodeInfo_Location `protobuf:"bytes,1,rep,name=location" json:"location,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *SourceCodeInfo) Reset()         { *m = SourceCodeInfo{} }
+func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo) ProtoMessage()    {}
+func (*SourceCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{19}
+}
+func (m *SourceCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo.Merge(m, src)
+}
+func (m *SourceCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo.Size(m)
+}
+func (m *SourceCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo proto.InternalMessageInfo
+
+func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location {
+	if m != nil {
+		return m.Location
+	}
+	return nil
+}
+
+type SourceCodeInfo_Location struct {
+	// Identifies which part of the FileDescriptorProto was defined at this
+	// location.
+	//
+	// Each element is a field number or an index.  They form a path from
+	// the root FileDescriptorProto to the place where the definition.  For
+	// example, this path:
+	//   [ 4, 3, 2, 7, 1 ]
+	// refers to:
+	//   file.message_type(3)  // 4, 3
+	//       .field(7)         // 2, 7
+	//       .name()           // 1
+	// This is because FileDescriptorProto.message_type has field number 4:
+	//   repeated DescriptorProto message_type = 4;
+	// and DescriptorProto.field has field number 2:
+	//   repeated FieldDescriptorProto field = 2;
+	// and FieldDescriptorProto.name has field number 1:
+	//   optional string name = 1;
+	//
+	// Thus, the above path gives the location of a field name.  If we removed
+	// the last element:
+	//   [ 4, 3, 2, 7 ]
+	// this path refers to the whole field declaration (from the beginning
+	// of the label to the terminating semicolon).
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Always has exactly three or four elements: start line, start column,
+	// end line (optional, otherwise assumed same as start line), end column.
+	// These are packed into a single field for efficiency.  Note that line
+	// and column numbers are zero-based -- typically you will want to add
+	// 1 to each before displaying to a user.
+	Span []int32 `protobuf:"varint,2,rep,packed,name=span" json:"span,omitempty"`
+	// If this SourceCodeInfo represents a complete declaration, these are any
+	// comments appearing before and after the declaration which appear to be
+	// attached to the declaration.
+	//
+	// A series of line comments appearing on consecutive lines, with no other
+	// tokens appearing on those lines, will be treated as a single comment.
+	//
+	// leading_detached_comments will keep paragraphs of comments that appear
+	// before (but not connected to) the current element. Each paragraph,
+	// separated by empty lines, will be one comment element in the repeated
+	// field.
+	//
+	// Only the comment content is provided; comment markers (e.g. //) are
+	// stripped out.  For block comments, leading whitespace and an asterisk
+	// will be stripped from the beginning of each line other than the first.
+	// Newlines are included in the output.
+	//
+	// Examples:
+	//
+	//   optional int32 foo = 1;  // Comment attached to foo.
+	//   // Comment attached to bar.
+	//   optional int32 bar = 2;
+	//
+	//   optional string baz = 3;
+	//   // Comment attached to baz.
+	//   // Another line attached to baz.
+	//
+	//   // Comment attached to qux.
+	//   //
+	//   // Another line attached to qux.
+	//   optional double qux = 4;
+	//
+	//   // Detached comment for corge. This is not leading or trailing comments
+	//   // to qux or corge because there are blank lines separating it from
+	//   // both.
+	//
+	//   // Detached comment for corge paragraph 2.
+	//
+	//   optional string corge = 5;
+	//   /* Block comment attached
+	//    * to corge.  Leading asterisks
+	//    * will be removed. */
+	//   /* Block comment attached to
+	//    * grault. */
+	//   optional int32 grault = 6;
+	//
+	//   // ignored detached comments.
+	LeadingComments         *string  `protobuf:"bytes,3,opt,name=leading_comments,json=leadingComments" json:"leading_comments,omitempty"`
+	TrailingComments        *string  `protobuf:"bytes,4,opt,name=trailing_comments,json=trailingComments" json:"trailing_comments,omitempty"`
+	LeadingDetachedComments []string `protobuf:"bytes,6,rep,name=leading_detached_comments,json=leadingDetachedComments" json:"leading_detached_comments,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
+	XXX_unrecognized        []byte   `json:"-"`
+	XXX_sizecache           int32    `json:"-"`
+}
+
+func (m *SourceCodeInfo_Location) Reset()         { *m = SourceCodeInfo_Location{} }
+func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) }
+func (*SourceCodeInfo_Location) ProtoMessage()    {}
+func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{19, 0}
+}
+func (m *SourceCodeInfo_Location) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SourceCodeInfo_Location.Unmarshal(m, b)
+}
+func (m *SourceCodeInfo_Location) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SourceCodeInfo_Location.Marshal(b, m, deterministic)
+}
+func (m *SourceCodeInfo_Location) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SourceCodeInfo_Location.Merge(m, src)
+}
+func (m *SourceCodeInfo_Location) XXX_Size() int {
+	return xxx_messageInfo_SourceCodeInfo_Location.Size(m)
+}
+func (m *SourceCodeInfo_Location) XXX_DiscardUnknown() {
+	xxx_messageInfo_SourceCodeInfo_Location.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SourceCodeInfo_Location proto.InternalMessageInfo
+
+func (m *SourceCodeInfo_Location) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetSpan() []int32 {
+	if m != nil {
+		return m.Span
+	}
+	return nil
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingComments() string {
+	if m != nil && m.LeadingComments != nil {
+		return *m.LeadingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetTrailingComments() string {
+	if m != nil && m.TrailingComments != nil {
+		return *m.TrailingComments
+	}
+	return ""
+}
+
+func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string {
+	if m != nil {
+		return m.LeadingDetachedComments
+	}
+	return nil
+}
+
+// Describes the relationship between generated code and its original source
+// file. A GeneratedCodeInfo message is associated with only one generated
+// source file, but may contain references to different source .proto files.
+type GeneratedCodeInfo struct {
+	// An Annotation connects some span of text in generated code to an element
+	// of its generating .proto file.
+	Annotation           []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *GeneratedCodeInfo) Reset()         { *m = GeneratedCodeInfo{} }
+func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo) ProtoMessage()    {}
+func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{20}
+}
+func (m *GeneratedCodeInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo.Merge(m, src)
+}
+func (m *GeneratedCodeInfo) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo.Size(m)
+}
+func (m *GeneratedCodeInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation {
+	if m != nil {
+		return m.Annotation
+	}
+	return nil
+}
+
+type GeneratedCodeInfo_Annotation struct {
+	// Identifies the element in the original source .proto file. This field
+	// is formatted the same as SourceCodeInfo.Location.path.
+	Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"`
+	// Identifies the filesystem path to the original source .proto.
+	SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"`
+	// Identifies the starting offset in bytes in the generated code
+	// that relates to the identified object.
+	Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"`
+	// Identifies the ending offset in bytes in the generated code that
+	// relates to the identified offset. The end offset should be one past
+	// the last relevant byte (so the length of the text = end - begin).
+	End                  *int32   `protobuf:"varint,4,opt,name=end" json:"end,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GeneratedCodeInfo_Annotation) Reset()         { *m = GeneratedCodeInfo_Annotation{} }
+func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) }
+func (*GeneratedCodeInfo_Annotation) ProtoMessage()    {}
+func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_308767df5ffe18af, []int{20, 0}
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Unmarshal(m, b)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Marshal(b, m, deterministic)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.Merge(m, src)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_Size() int {
+	return xxx_messageInfo_GeneratedCodeInfo_Annotation.Size(m)
+}
+func (m *GeneratedCodeInfo_Annotation) XXX_DiscardUnknown() {
+	xxx_messageInfo_GeneratedCodeInfo_Annotation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GeneratedCodeInfo_Annotation proto.InternalMessageInfo
+
+func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 {
+	if m != nil {
+		return m.Path
+	}
+	return nil
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string {
+	if m != nil && m.SourceFile != nil {
+		return *m.SourceFile
+	}
+	return ""
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 {
+	if m != nil && m.Begin != nil {
+		return *m.Begin
+	}
+	return 0
+}
+
+func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 {
+	if m != nil && m.End != nil {
+		return *m.End
+	}
+	return 0
+}
+
+func init() {
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value)
+	proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value)
+	proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value)
+	proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value)
+	proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value)
+	proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet")
+	proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto")
+	proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto")
+	proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange")
+	proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange")
+	proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions")
+	proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto")
+	proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto")
+	proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange")
+	proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto")
+	proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto")
+	proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto")
+	proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions")
+	proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions")
+	proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions")
+	proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions")
+	proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions")
+	proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions")
+	proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions")
+	proto.RegisterType((*MethodOptions)(nil), "google.protobuf.MethodOptions")
+	proto.RegisterType((*UninterpretedOption)(nil), "google.protobuf.UninterpretedOption")
+	proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart")
+	proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo")
+	proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location")
+	proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo")
+	proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation")
+}
+
+func init() { proto.RegisterFile("descriptor.proto", fileDescriptor_308767df5ffe18af) }
+
+var fileDescriptor_308767df5ffe18af = []byte{
+	// 2522 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8,
+	0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0x65, 0x7a, 0xec, 0x75, 0x18, 0xef, 0x47, 0x1c, 0xed, 0x66,
+	0xe3, 0x24, 0xbb, 0xca, 0xc2, 0x49, 0x9c, 0xac, 0x53, 0x6c, 0x2b, 0x4b, 0x8c, 0x57, 0xa9, 0xbe,
+	0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89,
+	0x83, 0x1e, 0x02, 0xf4, 0x54, 0xa0, 0x7f, 0x40, 0x51, 0x14, 0x3d, 0xf4, 0xb2, 0x40, 0xff, 0x80,
+	0x02, 0xed, 0xbd, 0xd7, 0x02, 0xbd, 0xf7, 0x50, 0xa0, 0x05, 0xda, 0x3f, 0xa1, 0xc7, 0x62, 0x66,
+	0x48, 0x8a, 0xd4, 0x47, 0xe2, 0x5d, 0x20, 0xd9, 0x93, 0x3d, 0xef, 0xfd, 0xde, 0x9b, 0x37, 0x8f,
+	0xbf, 0x79, 0xf3, 0x66, 0x04, 0x82, 0x46, 0x5c, 0xd5, 0xd1, 0x6d, 0xcf, 0x72, 0x2a, 0xb6, 0x63,
+	0x79, 0x16, 0x5a, 0x1b, 0x5a, 0xd6, 0xd0, 0x20, 0x7c, 0x74, 0x32, 0x19, 0x94, 0x5b, 0xb0, 0x7e,
+	0x4f, 0x37, 0x48, 0x3d, 0x04, 0xf6, 0x88, 0x87, 0xee, 0x40, 0x7a, 0xa0, 0x1b, 0x44, 0x4c, 0xec,
+	0xa4, 0x76, 0x0b, 0x7b, 0x1f, 0x56, 0x66, 0x8c, 0x2a, 0x71, 0x8b, 0x2e, 0x15, 0xcb, 0xcc, 0xa2,
+	0xfc, 0xef, 0x34, 0x6c, 0x2c, 0xd0, 0x22, 0x04, 0x69, 0x13, 0x8f, 0xa9, 0xc7, 0xc4, 0x6e, 0x5e,
+	0x66, 0xff, 0x23, 0x11, 0x56, 0x6c, 0xac, 0x3e, 0xc6, 0x43, 0x22, 0x26, 0x99, 0x38, 0x18, 0xa2,
+	0xf7, 0x01, 0x34, 0x62, 0x13, 0x53, 0x23, 0xa6, 0x7a, 0x2a, 0xa6, 0x76, 0x52, 0xbb, 0x79, 0x39,
+	0x22, 0x41, 0xd7, 0x60, 0xdd, 0x9e, 0x9c, 0x18, 0xba, 0xaa, 0x44, 0x60, 0xb0, 0x93, 0xda, 0xcd,
+	0xc8, 0x02, 0x57, 0xd4, 0xa7, 0xe0, 0xcb, 0xb0, 0xf6, 0x94, 0xe0, 0xc7, 0x51, 0x68, 0x81, 0x41,
+	0x4b, 0x54, 0x1c, 0x01, 0xd6, 0xa0, 0x38, 0x26, 0xae, 0x8b, 0x87, 0x44, 0xf1, 0x4e, 0x6d, 0x22,
+	0xa6, 0xd9, 0xea, 0x77, 0xe6, 0x56, 0x3f, 0xbb, 0xf2, 0x82, 0x6f, 0xd5, 0x3f, 0xb5, 0x09, 0xaa,
+	0x42, 0x9e, 0x98, 0x93, 0x31, 0xf7, 0x90, 0x59, 0x92, 0x3f, 0xc9, 0x9c, 0x8c, 0x67, 0xbd, 0xe4,
+	0xa8, 0x99, 0xef, 0x62, 0xc5, 0x25, 0xce, 0x13, 0x5d, 0x25, 0x62, 0x96, 0x39, 0xb8, 0x3c, 0xe7,
+	0xa0, 0xc7, 0xf5, 0xb3, 0x3e, 0x02, 0x3b, 0x54, 0x83, 0x3c, 0x79, 0xe6, 0x11, 0xd3, 0xd5, 0x2d,
+	0x53, 0x5c, 0x61, 0x4e, 0x2e, 0x2d, 0xf8, 0x8a, 0xc4, 0xd0, 0x66, 0x5d, 0x4c, 0xed, 0xd0, 0x3e,
+	0xac, 0x58, 0xb6, 0xa7, 0x5b, 0xa6, 0x2b, 0xe6, 0x76, 0x12, 0xbb, 0x85, 0xbd, 0x77, 0x17, 0x12,
+	0xa1, 0xc3, 0x31, 0x72, 0x00, 0x46, 0x0d, 0x10, 0x5c, 0x6b, 0xe2, 0xa8, 0x44, 0x51, 0x2d, 0x8d,
+	0x28, 0xba, 0x39, 0xb0, 0xc4, 0x3c, 0x73, 0x70, 0x61, 0x7e, 0x21, 0x0c, 0x58, 0xb3, 0x34, 0xd2,
+	0x30, 0x07, 0x96, 0x5c, 0x72, 0x63, 0x63, 0xb4, 0x05, 0x59, 0xf7, 0xd4, 0xf4, 0xf0, 0x33, 0xb1,
+	0xc8, 0x18, 0xe2, 0x8f, 0xca, 0x7f, 0xce, 0xc2, 0xda, 0x59, 0x28, 0x76, 0x17, 0x32, 0x03, 0xba,
+	0x4a, 0x31, 0xf9, 0x6d, 0x72, 0xc0, 0x6d, 0xe2, 0x49, 0xcc, 0x7e, 0xc7, 0x24, 0x56, 0xa1, 0x60,
+	0x12, 0xd7, 0x23, 0x1a, 0x67, 0x44, 0xea, 0x8c, 0x9c, 0x02, 0x6e, 0x34, 0x4f, 0xa9, 0xf4, 0x77,
+	0xa2, 0xd4, 0x03, 0x58, 0x0b, 0x43, 0x52, 0x1c, 0x6c, 0x0e, 0x03, 0x6e, 0x5e, 0x7f, 0x55, 0x24,
+	0x15, 0x29, 0xb0, 0x93, 0xa9, 0x99, 0x5c, 0x22, 0xb1, 0x31, 0xaa, 0x03, 0x58, 0x26, 0xb1, 0x06,
+	0x8a, 0x46, 0x54, 0x43, 0xcc, 0x2d, 0xc9, 0x52, 0x87, 0x42, 0xe6, 0xb2, 0x64, 0x71, 0xa9, 0x6a,
+	0xa0, 0xcf, 0xa6, 0x54, 0x5b, 0x59, 0xc2, 0x94, 0x16, 0xdf, 0x64, 0x73, 0x6c, 0x3b, 0x86, 0x92,
+	0x43, 0x28, 0xef, 0x89, 0xe6, 0xaf, 0x2c, 0xcf, 0x82, 0xa8, 0xbc, 0x72, 0x65, 0xb2, 0x6f, 0xc6,
+	0x17, 0xb6, 0xea, 0x44, 0x87, 0xe8, 0x03, 0x08, 0x05, 0x0a, 0xa3, 0x15, 0xb0, 0x2a, 0x54, 0x0c,
+	0x84, 0x6d, 0x3c, 0x26, 0xdb, 0xcf, 0xa1, 0x14, 0x4f, 0x0f, 0xda, 0x84, 0x8c, 0xeb, 0x61, 0xc7,
+	0x63, 0x2c, 0xcc, 0xc8, 0x7c, 0x80, 0x04, 0x48, 0x11, 0x53, 0x63, 0x55, 0x2e, 0x23, 0xd3, 0x7f,
+	0xd1, 0x8f, 0xa6, 0x0b, 0x4e, 0xb1, 0x05, 0x7f, 0x34, 0xff, 0x45, 0x63, 0x9e, 0x67, 0xd7, 0xbd,
+	0x7d, 0x1b, 0x56, 0x63, 0x0b, 0x38, 0xeb, 0xd4, 0xe5, 0x5f, 0xc0, 0xdb, 0x0b, 0x5d, 0xa3, 0x07,
+	0xb0, 0x39, 0x31, 0x75, 0xd3, 0x23, 0x8e, 0xed, 0x10, 0xca, 0x58, 0x3e, 0x95, 0xf8, 0x9f, 0x95,
+	0x25, 0x9c, 0x3b, 0x8e, 0xa2, 0xb9, 0x17, 0x79, 0x63, 0x32, 0x2f, 0xbc, 0x9a, 0xcf, 0xfd, 0x77,
+	0x45, 0x78, 0xf1, 0xe2, 0xc5, 0x8b, 0x64, 0xf9, 0x37, 0x59, 0xd8, 0x5c, 0xb4, 0x67, 0x16, 0x6e,
+	0xdf, 0x2d, 0xc8, 0x9a, 0x93, 0xf1, 0x09, 0x71, 0x58, 0x92, 0x32, 0xb2, 0x3f, 0x42, 0x55, 0xc8,
+	0x18, 0xf8, 0x84, 0x18, 0x62, 0x7a, 0x27, 0xb1, 0x5b, 0xda, 0xbb, 0x76, 0xa6, 0x5d, 0x59, 0x69,
+	0x52, 0x13, 0x99, 0x5b, 0xa2, 0xcf, 0x21, 0xed, 0x97, 0x68, 0xea, 0xe1, 0xea, 0xd9, 0x3c, 0xd0,
+	0xbd, 0x24, 0x33, 0x3b, 0xf4, 0x0e, 0xe4, 0xe9, 0x5f, 0xce, 0x8d, 0x2c, 0x8b, 0x39, 0x47, 0x05,
+	0x94, 0x17, 0x68, 0x1b, 0x72, 0x6c, 0x9b, 0x68, 0x24, 0x38, 0xda, 0xc2, 0x31, 0x25, 0x96, 0x46,
+	0x06, 0x78, 0x62, 0x78, 0xca, 0x13, 0x6c, 0x4c, 0x08, 0x23, 0x7c, 0x5e, 0x2e, 0xfa, 0xc2, 0x9f,
+	0x52, 0x19, 0xba, 0x00, 0x05, 0xbe, 0xab, 0x74, 0x53, 0x23, 0xcf, 0x58, 0xf5, 0xcc, 0xc8, 0x7c,
+	0xa3, 0x35, 0xa8, 0x84, 0x4e, 0xff, 0xc8, 0xb5, 0xcc, 0x80, 0x9a, 0x6c, 0x0a, 0x2a, 0x60, 0xd3,
+	0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x5e, 0xde, 0x2c, 0xa7, 0xca, 0x7f, 0x4a, 0x42, 0x9a, 0xd5,
+	0x8b, 0x35, 0x28, 0xf4, 0x1f, 0x76, 0x25, 0xa5, 0xde, 0x39, 0x3e, 0x6c, 0x4a, 0x42, 0x02, 0x95,
+	0x00, 0x98, 0xe0, 0x5e, 0xb3, 0x53, 0xed, 0x0b, 0xc9, 0x70, 0xdc, 0x68, 0xf7, 0xf7, 0x6f, 0x0a,
+	0xa9, 0xd0, 0xe0, 0x98, 0x0b, 0xd2, 0x51, 0xc0, 0x8d, 0x3d, 0x21, 0x83, 0x04, 0x28, 0x72, 0x07,
+	0x8d, 0x07, 0x52, 0x7d, 0xff, 0xa6, 0x90, 0x8d, 0x4b, 0x6e, 0xec, 0x09, 0x2b, 0x68, 0x15, 0xf2,
+	0x4c, 0x72, 0xd8, 0xe9, 0x34, 0x85, 0x5c, 0xe8, 0xb3, 0xd7, 0x97, 0x1b, 0xed, 0x23, 0x21, 0x1f,
+	0xfa, 0x3c, 0x92, 0x3b, 0xc7, 0x5d, 0x01, 0x42, 0x0f, 0x2d, 0xa9, 0xd7, 0xab, 0x1e, 0x49, 0x42,
+	0x21, 0x44, 0x1c, 0x3e, 0xec, 0x4b, 0x3d, 0xa1, 0x18, 0x0b, 0xeb, 0xc6, 0x9e, 0xb0, 0x1a, 0x4e,
+	0x21, 0xb5, 0x8f, 0x5b, 0x42, 0x09, 0xad, 0xc3, 0x2a, 0x9f, 0x22, 0x08, 0x62, 0x6d, 0x46, 0xb4,
+	0x7f, 0x53, 0x10, 0xa6, 0x81, 0x70, 0x2f, 0xeb, 0x31, 0xc1, 0xfe, 0x4d, 0x01, 0x95, 0x6b, 0x90,
+	0x61, 0xec, 0x42, 0x08, 0x4a, 0xcd, 0xea, 0xa1, 0xd4, 0x54, 0x3a, 0xdd, 0x7e, 0xa3, 0xd3, 0xae,
+	0x36, 0x85, 0xc4, 0x54, 0x26, 0x4b, 0x3f, 0x39, 0x6e, 0xc8, 0x52, 0x5d, 0x48, 0x46, 0x65, 0x5d,
+	0xa9, 0xda, 0x97, 0xea, 0x42, 0xaa, 0xac, 0xc2, 0xe6, 0xa2, 0x3a, 0xb9, 0x70, 0x67, 0x44, 0x3e,
+	0x71, 0x72, 0xc9, 0x27, 0x66, 0xbe, 0xe6, 0x3e, 0xf1, 0xbf, 0x92, 0xb0, 0xb1, 0xe0, 0xac, 0x58,
+	0x38, 0xc9, 0x0f, 0x21, 0xc3, 0x29, 0xca, 0x4f, 0xcf, 0x2b, 0x0b, 0x0f, 0x1d, 0x46, 0xd8, 0xb9,
+	0x13, 0x94, 0xd9, 0x45, 0x3b, 0x88, 0xd4, 0x92, 0x0e, 0x82, 0xba, 0x98, 0xab, 0xe9, 0x3f, 0x9f,
+	0xab, 0xe9, 0xfc, 0xd8, 0xdb, 0x3f, 0xcb, 0xb1, 0xc7, 0x64, 0xdf, 0xae, 0xb6, 0x67, 0x16, 0xd4,
+	0xf6, 0xbb, 0xb0, 0x3e, 0xe7, 0xe8, 0xcc, 0x35, 0xf6, 0x97, 0x09, 0x10, 0x97, 0x25, 0xe7, 0x15,
+	0x95, 0x2e, 0x19, 0xab, 0x74, 0x77, 0x67, 0x33, 0x78, 0x71, 0xf9, 0x47, 0x98, 0xfb, 0xd6, 0xdf,
+	0x24, 0x60, 0x6b, 0x71, 0xa7, 0xb8, 0x30, 0x86, 0xcf, 0x21, 0x3b, 0x26, 0xde, 0xc8, 0x0a, 0xba,
+	0xa5, 0x8f, 0x16, 0x9c, 0xc1, 0x54, 0x3d, 0xfb, 0xb1, 0x7d, 0xab, 0xe8, 0x21, 0x9e, 0x5a, 0xd6,
+	0xee, 0xf1, 0x68, 0xe6, 0x22, 0xfd, 0x55, 0x12, 0xde, 0x5e, 0xe8, 0x7c, 0x61, 0xa0, 0xef, 0x01,
+	0xe8, 0xa6, 0x3d, 0xf1, 0x78, 0x47, 0xc4, 0x0b, 0x6c, 0x9e, 0x49, 0x58, 0xf1, 0xa2, 0xc5, 0x73,
+	0xe2, 0x85, 0xfa, 0x14, 0xd3, 0x03, 0x17, 0x31, 0xc0, 0x9d, 0x69, 0xa0, 0x69, 0x16, 0xe8, 0xfb,
+	0x4b, 0x56, 0x3a, 0x47, 0xcc, 0x4f, 0x41, 0x50, 0x0d, 0x9d, 0x98, 0x9e, 0xe2, 0x7a, 0x0e, 0xc1,
+	0x63, 0xdd, 0x1c, 0xb2, 0x13, 0x24, 0x77, 0x90, 0x19, 0x60, 0xc3, 0x25, 0xf2, 0x1a, 0x57, 0xf7,
+	0x02, 0x2d, 0xb5, 0x60, 0x04, 0x72, 0x22, 0x16, 0xd9, 0x98, 0x05, 0x57, 0x87, 0x16, 0xe5, 0x5f,
+	0xe7, 0xa1, 0x10, 0xe9, 0xab, 0xd1, 0x45, 0x28, 0x3e, 0xc2, 0x4f, 0xb0, 0x12, 0xdc, 0x95, 0x78,
+	0x26, 0x0a, 0x54, 0xd6, 0xf5, 0xef, 0x4b, 0x9f, 0xc2, 0x26, 0x83, 0x58, 0x13, 0x8f, 0x38, 0x8a,
+	0x6a, 0x60, 0xd7, 0x65, 0x49, 0xcb, 0x31, 0x28, 0xa2, 0xba, 0x0e, 0x55, 0xd5, 0x02, 0x0d, 0xba,
+	0x05, 0x1b, 0xcc, 0x62, 0x3c, 0x31, 0x3c, 0xdd, 0x36, 0x88, 0x42, 0x6f, 0x6f, 0x2e, 0x3b, 0x49,
+	0xc2, 0xc8, 0xd6, 0x29, 0xa2, 0xe5, 0x03, 0x68, 0x44, 0x2e, 0xaa, 0xc3, 0x7b, 0xcc, 0x6c, 0x48,
+	0x4c, 0xe2, 0x60, 0x8f, 0x28, 0xe4, 0xeb, 0x09, 0x36, 0x5c, 0x05, 0x9b, 0x9a, 0x32, 0xc2, 0xee,
+	0x48, 0xdc, 0xa4, 0x0e, 0x0e, 0x93, 0x62, 0x42, 0x3e, 0x4f, 0x81, 0x47, 0x3e, 0x4e, 0x62, 0xb0,
+	0xaa, 0xa9, 0x7d, 0x81, 0xdd, 0x11, 0x3a, 0x80, 0x2d, 0xe6, 0xc5, 0xf5, 0x1c, 0xdd, 0x1c, 0x2a,
+	0xea, 0x88, 0xa8, 0x8f, 0x95, 0x89, 0x37, 0xb8, 0x23, 0xbe, 0x13, 0x9d, 0x9f, 0x45, 0xd8, 0x63,
+	0x98, 0x1a, 0x85, 0x1c, 0x7b, 0x83, 0x3b, 0xa8, 0x07, 0x45, 0xfa, 0x31, 0xc6, 0xfa, 0x73, 0xa2,
+	0x0c, 0x2c, 0x87, 0x1d, 0x8d, 0xa5, 0x05, 0xa5, 0x29, 0x92, 0xc1, 0x4a, 0xc7, 0x37, 0x68, 0x59,
+	0x1a, 0x39, 0xc8, 0xf4, 0xba, 0x92, 0x54, 0x97, 0x0b, 0x81, 0x97, 0x7b, 0x96, 0x43, 0x09, 0x35,
+	0xb4, 0xc2, 0x04, 0x17, 0x38, 0xa1, 0x86, 0x56, 0x90, 0xde, 0x5b, 0xb0, 0xa1, 0xaa, 0x7c, 0xcd,
+	0xba, 0xaa, 0xf8, 0x77, 0x2c, 0x57, 0x14, 0x62, 0xc9, 0x52, 0xd5, 0x23, 0x0e, 0xf0, 0x39, 0xee,
+	0xa2, 0xcf, 0xe0, 0xed, 0x69, 0xb2, 0xa2, 0x86, 0xeb, 0x73, 0xab, 0x9c, 0x35, 0xbd, 0x05, 0x1b,
+	0xf6, 0xe9, 0xbc, 0x21, 0x8a, 0xcd, 0x68, 0x9f, 0xce, 0x9a, 0xdd, 0x86, 0x4d, 0x7b, 0x64, 0xcf,
+	0xdb, 0x5d, 0x8d, 0xda, 0x21, 0x7b, 0x64, 0xcf, 0x1a, 0x5e, 0x62, 0x17, 0x6e, 0x87, 0xa8, 0xd8,
+	0x23, 0x9a, 0x78, 0x2e, 0x0a, 0x8f, 0x28, 0xd0, 0x75, 0x10, 0x54, 0x55, 0x21, 0x26, 0x3e, 0x31,
+	0x88, 0x82, 0x1d, 0x62, 0x62, 0x57, 0xbc, 0x10, 0x05, 0x97, 0x54, 0x55, 0x62, 0xda, 0x2a, 0x53,
+	0xa2, 0xab, 0xb0, 0x6e, 0x9d, 0x3c, 0x52, 0x39, 0x25, 0x15, 0xdb, 0x21, 0x03, 0xfd, 0x99, 0xf8,
+	0x21, 0xcb, 0xef, 0x1a, 0x55, 0x30, 0x42, 0x76, 0x99, 0x18, 0x5d, 0x01, 0x41, 0x75, 0x47, 0xd8,
+	0xb1, 0x59, 0x4d, 0x76, 0x6d, 0xac, 0x12, 0xf1, 0x12, 0x87, 0x72, 0x79, 0x3b, 0x10, 0xd3, 0x2d,
+	0xe1, 0x3e, 0xd5, 0x07, 0x5e, 0xe0, 0xf1, 0x32, 0xdf, 0x12, 0x4c, 0xe6, 0x7b, 0xdb, 0x05, 0x81,
+	0xa6, 0x22, 0x36, 0xf1, 0x2e, 0x83, 0x95, 0xec, 0x91, 0x1d, 0x9d, 0xf7, 0x03, 0x58, 0xa5, 0xc8,
+	0xe9, 0xa4, 0x57, 0x78, 0x43, 0x66, 0x8f, 0x22, 0x33, 0xde, 0x84, 0x2d, 0x0a, 0x1a, 0x13, 0x0f,
+	0x6b, 0xd8, 0xc3, 0x11, 0xf4, 0xc7, 0x0c, 0x4d, 0xf3, 0xde, 0xf2, 0x95, 0xb1, 0x38, 0x9d, 0xc9,
+	0xc9, 0x69, 0xc8, 0xac, 0x4f, 0x78, 0x9c, 0x54, 0x16, 0x70, 0xeb, 0xb5, 0x35, 0xdd, 0xe5, 0x03,
+	0x28, 0x46, 0x89, 0x8f, 0xf2, 0xc0, 0xa9, 0x2f, 0x24, 0x68, 0x17, 0x54, 0xeb, 0xd4, 0x69, 0xff,
+	0xf2, 0x95, 0x24, 0x24, 0x69, 0x1f, 0xd5, 0x6c, 0xf4, 0x25, 0x45, 0x3e, 0x6e, 0xf7, 0x1b, 0x2d,
+	0x49, 0x48, 0x45, 0x1b, 0xf6, 0xbf, 0x26, 0xa1, 0x14, 0xbf, 0x7b, 0xa1, 0x1f, 0xc0, 0xb9, 0xe0,
+	0xa1, 0xc4, 0x25, 0x9e, 0xf2, 0x54, 0x77, 0xd8, 0x5e, 0x1c, 0x63, 0x7e, 0x2e, 0x86, 0x6c, 0xd8,
+	0xf4, 0x51, 0x3d, 0xe2, 0x7d, 0xa9, 0x3b, 0x74, 0xa7, 0x8d, 0xb1, 0x87, 0x9a, 0x70, 0xc1, 0xb4,
+	0x14, 0xd7, 0xc3, 0xa6, 0x86, 0x1d, 0x4d, 0x99, 0x3e, 0x51, 0x29, 0x58, 0x55, 0x89, 0xeb, 0x5a,
+	0xfc, 0x0c, 0x0c, 0xbd, 0xbc, 0x6b, 0x5a, 0x3d, 0x1f, 0x3c, 0x3d, 0x1c, 0xaa, 0x3e, 0x74, 0x86,
+	0xb9, 0xa9, 0x65, 0xcc, 0x7d, 0x07, 0xf2, 0x63, 0x6c, 0x2b, 0xc4, 0xf4, 0x9c, 0x53, 0xd6, 0x71,
+	0xe7, 0xe4, 0xdc, 0x18, 0xdb, 0x12, 0x1d, 0xbf, 0x99, 0x8b, 0xcf, 0x3f, 0x52, 0x50, 0x8c, 0x76,
+	0xdd, 0xf4, 0x12, 0xa3, 0xb2, 0x03, 0x2a, 0xc1, 0x4a, 0xd8, 0x07, 0x2f, 0xed, 0xd1, 0x2b, 0x35,
+	0x7a, 0x72, 0x1d, 0x64, 0x79, 0x2f, 0x2c, 0x73, 0x4b, 0xda, 0x35, 0x50, 0x6a, 0x11, 0xde, 0x7b,
+	0xe4, 0x64, 0x7f, 0x84, 0x8e, 0x20, 0xfb, 0xc8, 0x65, 0xbe, 0xb3, 0xcc, 0xf7, 0x87, 0x2f, 0xf7,
+	0x7d, 0xbf, 0xc7, 0x9c, 0xe7, 0xef, 0xf7, 0x94, 0x76, 0x47, 0x6e, 0x55, 0x9b, 0xb2, 0x6f, 0x8e,
+	0xce, 0x43, 0xda, 0xc0, 0xcf, 0x4f, 0xe3, 0x67, 0x1c, 0x13, 0x9d, 0x35, 0xf1, 0xe7, 0x21, 0xfd,
+	0x94, 0xe0, 0xc7, 0xf1, 0x93, 0x85, 0x89, 0x5e, 0x23, 0xf5, 0xaf, 0x43, 0x86, 0xe5, 0x0b, 0x01,
+	0xf8, 0x19, 0x13, 0xde, 0x42, 0x39, 0x48, 0xd7, 0x3a, 0x32, 0xa5, 0xbf, 0x00, 0x45, 0x2e, 0x55,
+	0xba, 0x0d, 0xa9, 0x26, 0x09, 0xc9, 0xf2, 0x2d, 0xc8, 0xf2, 0x24, 0xd0, 0xad, 0x11, 0xa6, 0x41,
+	0x78, 0xcb, 0x1f, 0xfa, 0x3e, 0x12, 0x81, 0xf6, 0xb8, 0x75, 0x28, 0xc9, 0x42, 0x32, 0xfa, 0x79,
+	0x5d, 0x28, 0x46, 0x1b, 0xee, 0x37, 0xc3, 0xa9, 0xbf, 0x24, 0xa0, 0x10, 0x69, 0xa0, 0x69, 0xe7,
+	0x83, 0x0d, 0xc3, 0x7a, 0xaa, 0x60, 0x43, 0xc7, 0xae, 0x4f, 0x0a, 0x60, 0xa2, 0x2a, 0x95, 0x9c,
+	0xf5, 0xa3, 0xbd, 0x91, 0xe0, 0x7f, 0x9f, 0x00, 0x61, 0xb6, 0x77, 0x9d, 0x09, 0x30, 0xf1, 0xbd,
+	0x06, 0xf8, 0xbb, 0x04, 0x94, 0xe2, 0x0d, 0xeb, 0x4c, 0x78, 0x17, 0xbf, 0xd7, 0xf0, 0xfe, 0x99,
+	0x84, 0xd5, 0x58, 0x9b, 0x7a, 0xd6, 0xe8, 0xbe, 0x86, 0x75, 0x5d, 0x23, 0x63, 0xdb, 0xf2, 0x88,
+	0xa9, 0x9e, 0x2a, 0x06, 0x79, 0x42, 0x0c, 0xb1, 0xcc, 0x0a, 0xc5, 0xf5, 0x97, 0x37, 0xc2, 0x95,
+	0xc6, 0xd4, 0xae, 0x49, 0xcd, 0x0e, 0x36, 0x1a, 0x75, 0xa9, 0xd5, 0xed, 0xf4, 0xa5, 0x76, 0xed,
+	0xa1, 0x72, 0xdc, 0xfe, 0x71, 0xbb, 0xf3, 0x65, 0x5b, 0x16, 0xf4, 0x19, 0xd8, 0x6b, 0xdc, 0xea,
+	0x5d, 0x10, 0x66, 0x83, 0x42, 0xe7, 0x60, 0x51, 0x58, 0xc2, 0x5b, 0x68, 0x03, 0xd6, 0xda, 0x1d,
+	0xa5, 0xd7, 0xa8, 0x4b, 0x8a, 0x74, 0xef, 0x9e, 0x54, 0xeb, 0xf7, 0xf8, 0xd3, 0x46, 0x88, 0xee,
+	0xc7, 0x37, 0xf5, 0x6f, 0x53, 0xb0, 0xb1, 0x20, 0x12, 0x54, 0xf5, 0x2f, 0x25, 0xfc, 0x9e, 0xf4,
+	0xc9, 0x59, 0xa2, 0xaf, 0xd0, 0xae, 0xa0, 0x8b, 0x1d, 0xcf, 0xbf, 0xc3, 0x5c, 0x01, 0x9a, 0x25,
+	0xd3, 0xd3, 0x07, 0x3a, 0x71, 0xfc, 0x97, 0x20, 0x7e, 0x53, 0x59, 0x9b, 0xca, 0xf9, 0x63, 0xd0,
+	0xc7, 0x80, 0x6c, 0xcb, 0xd5, 0x3d, 0xfd, 0x09, 0x51, 0x74, 0x33, 0x78, 0x36, 0xa2, 0x37, 0x97,
+	0xb4, 0x2c, 0x04, 0x9a, 0x86, 0xe9, 0x85, 0x68, 0x93, 0x0c, 0xf1, 0x0c, 0x9a, 0x16, 0xf0, 0x94,
+	0x2c, 0x04, 0x9a, 0x10, 0x7d, 0x11, 0x8a, 0x9a, 0x35, 0xa1, 0xed, 0x1c, 0xc7, 0xd1, 0xf3, 0x22,
+	0x21, 0x17, 0xb8, 0x2c, 0x84, 0xf8, 0x8d, 0xfa, 0xf4, 0xbd, 0xaa, 0x28, 0x17, 0xb8, 0x8c, 0x43,
+	0x2e, 0xc3, 0x1a, 0x1e, 0x0e, 0x1d, 0xea, 0x3c, 0x70, 0xc4, 0xaf, 0x1e, 0xa5, 0x50, 0xcc, 0x80,
+	0xdb, 0xf7, 0x21, 0x17, 0xe4, 0x81, 0x1e, 0xc9, 0x34, 0x13, 0x8a, 0xcd, 0xef, 0xd3, 0xc9, 0xdd,
+	0xbc, 0x9c, 0x33, 0x03, 0xe5, 0x45, 0x28, 0xea, 0xae, 0x32, 0x7d, 0x7e, 0x4f, 0xee, 0x24, 0x77,
+	0x73, 0x72, 0x41, 0x77, 0xc3, 0xa7, 0xcb, 0xf2, 0x37, 0x49, 0x28, 0xc5, 0x7f, 0x3e, 0x40, 0x75,
+	0xc8, 0x19, 0x96, 0x8a, 0x19, 0xb5, 0xf8, 0x6f, 0x57, 0xbb, 0xaf, 0xf8, 0xc5, 0xa1, 0xd2, 0xf4,
+	0xf1, 0x72, 0x68, 0xb9, 0xfd, 0xb7, 0x04, 0xe4, 0x02, 0x31, 0xda, 0x82, 0xb4, 0x8d, 0xbd, 0x11,
+	0x73, 0x97, 0x39, 0x4c, 0x0a, 0x09, 0x99, 0x8d, 0xa9, 0xdc, 0xb5, 0xb1, 0xc9, 0x28, 0xe0, 0xcb,
+	0xe9, 0x98, 0x7e, 0x57, 0x83, 0x60, 0x8d, 0xdd, 0x6b, 0xac, 0xf1, 0x98, 0x98, 0x9e, 0x1b, 0x7c,
+	0x57, 0x5f, 0x5e, 0xf3, 0xc5, 0xe8, 0x1a, 0xac, 0x7b, 0x0e, 0xd6, 0x8d, 0x18, 0x36, 0xcd, 0xb0,
+	0x42, 0xa0, 0x08, 0xc1, 0x07, 0x70, 0x3e, 0xf0, 0xab, 0x11, 0x0f, 0xab, 0x23, 0xa2, 0x4d, 0x8d,
+	0xb2, 0xec, 0xfd, 0xe2, 0x9c, 0x0f, 0xa8, 0xfb, 0xfa, 0xc0, 0xb6, 0xfc, 0xf7, 0x04, 0xac, 0x07,
+	0x37, 0x31, 0x2d, 0x4c, 0x56, 0x0b, 0x00, 0x9b, 0xa6, 0xe5, 0x45, 0xd3, 0x35, 0x4f, 0xe5, 0x39,
+	0xbb, 0x4a, 0x35, 0x34, 0x92, 0x23, 0x0e, 0xb6, 0xc7, 0x00, 0x53, 0xcd, 0xd2, 0xb4, 0x5d, 0x80,
+	0x82, 0xff, 0xdb, 0x10, 0xfb, 0x81, 0x91, 0xdf, 0xdd, 0x81, 0x8b, 0xe8, 0x95, 0x0d, 0x6d, 0x42,
+	0xe6, 0x84, 0x0c, 0x75, 0xd3, 0x7f, 0xf1, 0xe5, 0x83, 0xe0, 0x85, 0x25, 0x1d, 0xbe, 0xb0, 0x1c,
+	0xfe, 0x0c, 0x36, 0x54, 0x6b, 0x3c, 0x1b, 0xee, 0xa1, 0x30, 0xf3, 0x7e, 0xe0, 0x7e, 0x91, 0xf8,
+	0x0a, 0xa6, 0x2d, 0xe6, 0xff, 0x12, 0x89, 0x3f, 0x24, 0x53, 0x47, 0xdd, 0xc3, 0x3f, 0x26, 0xb7,
+	0x8f, 0xb8, 0x69, 0x37, 0x58, 0xa9, 0x4c, 0x06, 0x06, 0x51, 0x69, 0xf4, 0xff, 0x0f, 0x00, 0x00,
+	0xff, 0xff, 0x88, 0x17, 0xc1, 0xbe, 0x38, 0x1d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
new file mode 100644
index 0000000..165b211
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go
@@ -0,0 +1,752 @@
+// Code generated by protoc-gen-gogo. DO NOT EDIT.
+// source: descriptor.proto
+
+package descriptor
+
+import (
+	fmt "fmt"
+	github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto"
+	proto "github.com/gogo/protobuf/proto"
+	math "math"
+	reflect "reflect"
+	sort "sort"
+	strconv "strconv"
+	strings "strings"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+func (this *FileDescriptorSet) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.FileDescriptorSet{")
+	if this.File != nil {
+		s = append(s, "File: "+fmt.Sprintf("%#v", this.File)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FileDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 16)
+	s = append(s, "&descriptor.FileDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Package != nil {
+		s = append(s, "Package: "+valueToGoStringDescriptor(this.Package, "string")+",\n")
+	}
+	if this.Dependency != nil {
+		s = append(s, "Dependency: "+fmt.Sprintf("%#v", this.Dependency)+",\n")
+	}
+	if this.PublicDependency != nil {
+		s = append(s, "PublicDependency: "+fmt.Sprintf("%#v", this.PublicDependency)+",\n")
+	}
+	if this.WeakDependency != nil {
+		s = append(s, "WeakDependency: "+fmt.Sprintf("%#v", this.WeakDependency)+",\n")
+	}
+	if this.MessageType != nil {
+		s = append(s, "MessageType: "+fmt.Sprintf("%#v", this.MessageType)+",\n")
+	}
+	if this.EnumType != nil {
+		s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+	}
+	if this.Service != nil {
+		s = append(s, "Service: "+fmt.Sprintf("%#v", this.Service)+",\n")
+	}
+	if this.Extension != nil {
+		s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.SourceCodeInfo != nil {
+		s = append(s, "SourceCodeInfo: "+fmt.Sprintf("%#v", this.SourceCodeInfo)+",\n")
+	}
+	if this.Syntax != nil {
+		s = append(s, "Syntax: "+valueToGoStringDescriptor(this.Syntax, "string")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *DescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 14)
+	s = append(s, "&descriptor.DescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Field != nil {
+		s = append(s, "Field: "+fmt.Sprintf("%#v", this.Field)+",\n")
+	}
+	if this.Extension != nil {
+		s = append(s, "Extension: "+fmt.Sprintf("%#v", this.Extension)+",\n")
+	}
+	if this.NestedType != nil {
+		s = append(s, "NestedType: "+fmt.Sprintf("%#v", this.NestedType)+",\n")
+	}
+	if this.EnumType != nil {
+		s = append(s, "EnumType: "+fmt.Sprintf("%#v", this.EnumType)+",\n")
+	}
+	if this.ExtensionRange != nil {
+		s = append(s, "ExtensionRange: "+fmt.Sprintf("%#v", this.ExtensionRange)+",\n")
+	}
+	if this.OneofDecl != nil {
+		s = append(s, "OneofDecl: "+fmt.Sprintf("%#v", this.OneofDecl)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.ReservedRange != nil {
+		s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
+	}
+	if this.ReservedName != nil {
+		s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *DescriptorProto_ExtensionRange) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.DescriptorProto_ExtensionRange{")
+	if this.Start != nil {
+		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *DescriptorProto_ReservedRange) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.DescriptorProto_ReservedRange{")
+	if this.Start != nil {
+		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ExtensionRangeOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.ExtensionRangeOptions{")
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FieldDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 14)
+	s = append(s, "&descriptor.FieldDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Number != nil {
+		s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+	}
+	if this.Label != nil {
+		s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n")
+	}
+	if this.Type != nil {
+		s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n")
+	}
+	if this.TypeName != nil {
+		s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n")
+	}
+	if this.Extendee != nil {
+		s = append(s, "Extendee: "+valueToGoStringDescriptor(this.Extendee, "string")+",\n")
+	}
+	if this.DefaultValue != nil {
+		s = append(s, "DefaultValue: "+valueToGoStringDescriptor(this.DefaultValue, "string")+",\n")
+	}
+	if this.OneofIndex != nil {
+		s = append(s, "OneofIndex: "+valueToGoStringDescriptor(this.OneofIndex, "int32")+",\n")
+	}
+	if this.JsonName != nil {
+		s = append(s, "JsonName: "+valueToGoStringDescriptor(this.JsonName, "string")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *OneofDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.OneofDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 9)
+	s = append(s, "&descriptor.EnumDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Value != nil {
+		s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.ReservedRange != nil {
+		s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n")
+	}
+	if this.ReservedName != nil {
+		s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumDescriptorProto_EnumReservedRange) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{")
+	if this.Start != nil {
+		s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumValueDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.EnumValueDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Number != nil {
+		s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ServiceDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.ServiceDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.Method != nil {
+		s = append(s, "Method: "+fmt.Sprintf("%#v", this.Method)+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MethodDescriptorProto) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 10)
+	s = append(s, "&descriptor.MethodDescriptorProto{")
+	if this.Name != nil {
+		s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n")
+	}
+	if this.InputType != nil {
+		s = append(s, "InputType: "+valueToGoStringDescriptor(this.InputType, "string")+",\n")
+	}
+	if this.OutputType != nil {
+		s = append(s, "OutputType: "+valueToGoStringDescriptor(this.OutputType, "string")+",\n")
+	}
+	if this.Options != nil {
+		s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n")
+	}
+	if this.ClientStreaming != nil {
+		s = append(s, "ClientStreaming: "+valueToGoStringDescriptor(this.ClientStreaming, "bool")+",\n")
+	}
+	if this.ServerStreaming != nil {
+		s = append(s, "ServerStreaming: "+valueToGoStringDescriptor(this.ServerStreaming, "bool")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FileOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 25)
+	s = append(s, "&descriptor.FileOptions{")
+	if this.JavaPackage != nil {
+		s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n")
+	}
+	if this.JavaOuterClassname != nil {
+		s = append(s, "JavaOuterClassname: "+valueToGoStringDescriptor(this.JavaOuterClassname, "string")+",\n")
+	}
+	if this.JavaMultipleFiles != nil {
+		s = append(s, "JavaMultipleFiles: "+valueToGoStringDescriptor(this.JavaMultipleFiles, "bool")+",\n")
+	}
+	if this.JavaGenerateEqualsAndHash != nil {
+		s = append(s, "JavaGenerateEqualsAndHash: "+valueToGoStringDescriptor(this.JavaGenerateEqualsAndHash, "bool")+",\n")
+	}
+	if this.JavaStringCheckUtf8 != nil {
+		s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n")
+	}
+	if this.OptimizeFor != nil {
+		s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n")
+	}
+	if this.GoPackage != nil {
+		s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n")
+	}
+	if this.CcGenericServices != nil {
+		s = append(s, "CcGenericServices: "+valueToGoStringDescriptor(this.CcGenericServices, "bool")+",\n")
+	}
+	if this.JavaGenericServices != nil {
+		s = append(s, "JavaGenericServices: "+valueToGoStringDescriptor(this.JavaGenericServices, "bool")+",\n")
+	}
+	if this.PyGenericServices != nil {
+		s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n")
+	}
+	if this.PhpGenericServices != nil {
+		s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.CcEnableArenas != nil {
+		s = append(s, "CcEnableArenas: "+valueToGoStringDescriptor(this.CcEnableArenas, "bool")+",\n")
+	}
+	if this.ObjcClassPrefix != nil {
+		s = append(s, "ObjcClassPrefix: "+valueToGoStringDescriptor(this.ObjcClassPrefix, "string")+",\n")
+	}
+	if this.CsharpNamespace != nil {
+		s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n")
+	}
+	if this.SwiftPrefix != nil {
+		s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n")
+	}
+	if this.PhpClassPrefix != nil {
+		s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n")
+	}
+	if this.PhpNamespace != nil {
+		s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n")
+	}
+	if this.PhpMetadataNamespace != nil {
+		s = append(s, "PhpMetadataNamespace: "+valueToGoStringDescriptor(this.PhpMetadataNamespace, "string")+",\n")
+	}
+	if this.RubyPackage != nil {
+		s = append(s, "RubyPackage: "+valueToGoStringDescriptor(this.RubyPackage, "string")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MessageOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 9)
+	s = append(s, "&descriptor.MessageOptions{")
+	if this.MessageSetWireFormat != nil {
+		s = append(s, "MessageSetWireFormat: "+valueToGoStringDescriptor(this.MessageSetWireFormat, "bool")+",\n")
+	}
+	if this.NoStandardDescriptorAccessor != nil {
+		s = append(s, "NoStandardDescriptorAccessor: "+valueToGoStringDescriptor(this.NoStandardDescriptorAccessor, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.MapEntry != nil {
+		s = append(s, "MapEntry: "+valueToGoStringDescriptor(this.MapEntry, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *FieldOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 11)
+	s = append(s, "&descriptor.FieldOptions{")
+	if this.Ctype != nil {
+		s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n")
+	}
+	if this.Packed != nil {
+		s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n")
+	}
+	if this.Jstype != nil {
+		s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n")
+	}
+	if this.Lazy != nil {
+		s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.Weak != nil {
+		s = append(s, "Weak: "+valueToGoStringDescriptor(this.Weak, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *OneofOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.OneofOptions{")
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.EnumOptions{")
+	if this.AllowAlias != nil {
+		s = append(s, "AllowAlias: "+valueToGoStringDescriptor(this.AllowAlias, "bool")+",\n")
+	}
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *EnumValueOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.EnumValueOptions{")
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *ServiceOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.ServiceOptions{")
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *MethodOptions) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 7)
+	s = append(s, "&descriptor.MethodOptions{")
+	if this.Deprecated != nil {
+		s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n")
+	}
+	if this.IdempotencyLevel != nil {
+		s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n")
+	}
+	if this.UninterpretedOption != nil {
+		s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n")
+	}
+	s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n")
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *UninterpretedOption) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 11)
+	s = append(s, "&descriptor.UninterpretedOption{")
+	if this.Name != nil {
+		s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n")
+	}
+	if this.IdentifierValue != nil {
+		s = append(s, "IdentifierValue: "+valueToGoStringDescriptor(this.IdentifierValue, "string")+",\n")
+	}
+	if this.PositiveIntValue != nil {
+		s = append(s, "PositiveIntValue: "+valueToGoStringDescriptor(this.PositiveIntValue, "uint64")+",\n")
+	}
+	if this.NegativeIntValue != nil {
+		s = append(s, "NegativeIntValue: "+valueToGoStringDescriptor(this.NegativeIntValue, "int64")+",\n")
+	}
+	if this.DoubleValue != nil {
+		s = append(s, "DoubleValue: "+valueToGoStringDescriptor(this.DoubleValue, "float64")+",\n")
+	}
+	if this.StringValue != nil {
+		s = append(s, "StringValue: "+valueToGoStringDescriptor(this.StringValue, "byte")+",\n")
+	}
+	if this.AggregateValue != nil {
+		s = append(s, "AggregateValue: "+valueToGoStringDescriptor(this.AggregateValue, "string")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *UninterpretedOption_NamePart) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 6)
+	s = append(s, "&descriptor.UninterpretedOption_NamePart{")
+	if this.NamePart != nil {
+		s = append(s, "NamePart: "+valueToGoStringDescriptor(this.NamePart, "string")+",\n")
+	}
+	if this.IsExtension != nil {
+		s = append(s, "IsExtension: "+valueToGoStringDescriptor(this.IsExtension, "bool")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *SourceCodeInfo) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.SourceCodeInfo{")
+	if this.Location != nil {
+		s = append(s, "Location: "+fmt.Sprintf("%#v", this.Location)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *SourceCodeInfo_Location) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 9)
+	s = append(s, "&descriptor.SourceCodeInfo_Location{")
+	if this.Path != nil {
+		s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+	}
+	if this.Span != nil {
+		s = append(s, "Span: "+fmt.Sprintf("%#v", this.Span)+",\n")
+	}
+	if this.LeadingComments != nil {
+		s = append(s, "LeadingComments: "+valueToGoStringDescriptor(this.LeadingComments, "string")+",\n")
+	}
+	if this.TrailingComments != nil {
+		s = append(s, "TrailingComments: "+valueToGoStringDescriptor(this.TrailingComments, "string")+",\n")
+	}
+	if this.LeadingDetachedComments != nil {
+		s = append(s, "LeadingDetachedComments: "+fmt.Sprintf("%#v", this.LeadingDetachedComments)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *GeneratedCodeInfo) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 5)
+	s = append(s, "&descriptor.GeneratedCodeInfo{")
+	if this.Annotation != nil {
+		s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func (this *GeneratedCodeInfo_Annotation) GoString() string {
+	if this == nil {
+		return "nil"
+	}
+	s := make([]string, 0, 8)
+	s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{")
+	if this.Path != nil {
+		s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n")
+	}
+	if this.SourceFile != nil {
+		s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n")
+	}
+	if this.Begin != nil {
+		s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n")
+	}
+	if this.End != nil {
+		s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n")
+	}
+	if this.XXX_unrecognized != nil {
+		s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n")
+	}
+	s = append(s, "}")
+	return strings.Join(s, "")
+}
+func valueToGoStringDescriptor(v interface{}, typ string) string {
+	rv := reflect.ValueOf(v)
+	if rv.IsNil() {
+		return "nil"
+	}
+	pv := reflect.Indirect(rv).Interface()
+	return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
+}
+func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string {
+	e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m)
+	if e == nil {
+		return "nil"
+	}
+	s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"
+	keys := make([]int, 0, len(e))
+	for k := range e {
+		keys = append(keys, int(k))
+	}
+	sort.Ints(keys)
+	ss := []string{}
+	for _, k := range keys {
+		ss = append(ss, strconv.Itoa(k)+": "+e[int32(k)].GoString())
+	}
+	s += strings.Join(ss, ",") + "})"
+	return s
+}
diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
new file mode 100644
index 0000000..e0846a3
--- /dev/null
+++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go
@@ -0,0 +1,390 @@
+// Protocol Buffers for Go with Gadgets
+//
+// Copyright (c) 2013, The GoGo Authors. All rights reserved.
+// http://github.com/gogo/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package descriptor
+
+import (
+	"strings"
+)
+
+func (msg *DescriptorProto) GetMapFields() (*FieldDescriptorProto, *FieldDescriptorProto) {
+	if !msg.GetOptions().GetMapEntry() {
+		return nil, nil
+	}
+	return msg.GetField()[0], msg.GetField()[1]
+}
+
+func dotToUnderscore(r rune) rune {
+	if r == '.' {
+		return '_'
+	}
+	return r
+}
+
+func (field *FieldDescriptorProto) WireType() (wire int) {
+	switch *field.Type {
+	case FieldDescriptorProto_TYPE_DOUBLE:
+		return 1
+	case FieldDescriptorProto_TYPE_FLOAT:
+		return 5
+	case FieldDescriptorProto_TYPE_INT64:
+		return 0
+	case FieldDescriptorProto_TYPE_UINT64:
+		return 0
+	case FieldDescriptorProto_TYPE_INT32:
+		return 0
+	case FieldDescriptorProto_TYPE_UINT32:
+		return 0
+	case FieldDescriptorProto_TYPE_FIXED64:
+		return 1
+	case FieldDescriptorProto_TYPE_FIXED32:
+		return 5
+	case FieldDescriptorProto_TYPE_BOOL:
+		return 0
+	case FieldDescriptorProto_TYPE_STRING:
+		return 2
+	case FieldDescriptorProto_TYPE_GROUP:
+		return 2
+	case FieldDescriptorProto_TYPE_MESSAGE:
+		return 2
+	case FieldDescriptorProto_TYPE_BYTES:
+		return 2
+	case FieldDescriptorProto_TYPE_ENUM:
+		return 0
+	case FieldDescriptorProto_TYPE_SFIXED32:
+		return 5
+	case FieldDescriptorProto_TYPE_SFIXED64:
+		return 1
+	case FieldDescriptorProto_TYPE_SINT32:
+		return 0
+	case FieldDescriptorProto_TYPE_SINT64:
+		return 0
+	}
+	panic("unreachable")
+}
+
+func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) {
+	packed := field.IsPacked()
+	wireType := field.WireType()
+	fieldNumber := field.GetNumber()
+	if packed {
+		wireType = 2
+	}
+	x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
+	return x
+}
+
+func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) {
+	packed := field.IsPacked3()
+	wireType := field.WireType()
+	fieldNumber := field.GetNumber()
+	if packed {
+		wireType = 2
+	}
+	x = uint64(uint32(fieldNumber)<<3 | uint32(wireType))
+	return x
+}
+
+func (field *FieldDescriptorProto) GetKey() []byte {
+	x := field.GetKeyUint64()
+	i := 0
+	keybuf := make([]byte, 0)
+	for i = 0; x > 127; i++ {
+		keybuf = append(keybuf, 0x80|uint8(x&0x7F))
+		x >>= 7
+	}
+	keybuf = append(keybuf, uint8(x))
+	return keybuf
+}
+
+func (field *FieldDescriptorProto) GetKey3() []byte {
+	x := field.GetKey3Uint64()
+	i := 0
+	keybuf := make([]byte, 0)
+	for i = 0; x > 127; i++ {
+		keybuf = append(keybuf, 0x80|uint8(x&0x7F))
+		x >>= 7
+	}
+	keybuf = append(keybuf, uint8(x))
+	return keybuf
+}
+
+func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto {
+	msg := desc.GetMessage(packageName, messageName)
+	if msg == nil {
+		return nil
+	}
+	for _, field := range msg.GetField() {
+		if field.GetName() == fieldName {
+			return field
+		}
+	}
+	return nil
+}
+
+func (file *FileDescriptorProto) GetMessage(typeName string) *DescriptorProto {
+	for _, msg := range file.GetMessageType() {
+		if msg.GetName() == typeName {
+			return msg
+		}
+		nes := file.GetNestedMessage(msg, strings.TrimPrefix(typeName, msg.GetName()+"."))
+		if nes != nil {
+			return nes
+		}
+	}
+	return nil
+}
+
+func (file *FileDescriptorProto) GetNestedMessage(msg *DescriptorProto, typeName string) *DescriptorProto {
+	for _, nes := range msg.GetNestedType() {
+		if nes.GetName() == typeName {
+			return nes
+		}
+		res := file.GetNestedMessage(nes, strings.TrimPrefix(typeName, nes.GetName()+"."))
+		if res != nil {
+			return res
+		}
+	}
+	return nil
+}
+
+func (desc *FileDescriptorSet) GetMessage(packageName string, typeName string) *DescriptorProto {
+	for _, file := range desc.GetFile() {
+		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+			continue
+		}
+		for _, msg := range file.GetMessageType() {
+			if msg.GetName() == typeName {
+				return msg
+			}
+		}
+		for _, msg := range file.GetMessageType() {
+			for _, nes := range msg.GetNestedType() {
+				if nes.GetName() == typeName {
+					return nes
+				}
+				if msg.GetName()+"."+nes.GetName() == typeName {
+					return nes
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (desc *FileDescriptorSet) IsProto3(packageName string, typeName string) bool {
+	for _, file := range desc.GetFile() {
+		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+			continue
+		}
+		for _, msg := range file.GetMessageType() {
+			if msg.GetName() == typeName {
+				return file.GetSyntax() == "proto3"
+			}
+		}
+		for _, msg := range file.GetMessageType() {
+			for _, nes := range msg.GetNestedType() {
+				if nes.GetName() == typeName {
+					return file.GetSyntax() == "proto3"
+				}
+				if msg.GetName()+"."+nes.GetName() == typeName {
+					return file.GetSyntax() == "proto3"
+				}
+			}
+		}
+	}
+	return false
+}
+
+func (msg *DescriptorProto) IsExtendable() bool {
+	return len(msg.GetExtensionRange()) > 0
+}
+
+func (desc *FileDescriptorSet) FindExtension(packageName string, typeName string, fieldName string) (extPackageName string, field *FieldDescriptorProto) {
+	parent := desc.GetMessage(packageName, typeName)
+	if parent == nil {
+		return "", nil
+	}
+	if !parent.IsExtendable() {
+		return "", nil
+	}
+	extendee := "." + packageName + "." + typeName
+	for _, file := range desc.GetFile() {
+		for _, ext := range file.GetExtension() {
+			if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+				if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+					continue
+				}
+			} else {
+				if ext.GetExtendee() != extendee {
+					continue
+				}
+			}
+			if ext.GetName() == fieldName {
+				return file.GetPackage(), ext
+			}
+		}
+	}
+	return "", nil
+}
+
+func (desc *FileDescriptorSet) FindExtensionByFieldNumber(packageName string, typeName string, fieldNum int32) (extPackageName string, field *FieldDescriptorProto) {
+	parent := desc.GetMessage(packageName, typeName)
+	if parent == nil {
+		return "", nil
+	}
+	if !parent.IsExtendable() {
+		return "", nil
+	}
+	extendee := "." + packageName + "." + typeName
+	for _, file := range desc.GetFile() {
+		for _, ext := range file.GetExtension() {
+			if strings.Map(dotToUnderscore, file.GetPackage()) == strings.Map(dotToUnderscore, packageName) {
+				if !(ext.GetExtendee() == typeName || ext.GetExtendee() == extendee) {
+					continue
+				}
+			} else {
+				if ext.GetExtendee() != extendee {
+					continue
+				}
+			}
+			if ext.GetNumber() == fieldNum {
+				return file.GetPackage(), ext
+			}
+		}
+	}
+	return "", nil
+}
+
+func (desc *FileDescriptorSet) FindMessage(packageName string, typeName string, fieldName string) (msgPackageName string, msgName string) {
+	parent := desc.GetMessage(packageName, typeName)
+	if parent == nil {
+		return "", ""
+	}
+	field := parent.GetFieldDescriptor(fieldName)
+	if field == nil {
+		var extPackageName string
+		extPackageName, field = desc.FindExtension(packageName, typeName, fieldName)
+		if field == nil {
+			return "", ""
+		}
+		packageName = extPackageName
+	}
+	typeNames := strings.Split(field.GetTypeName(), ".")
+	if len(typeNames) == 1 {
+		msg := desc.GetMessage(packageName, typeName)
+		if msg == nil {
+			return "", ""
+		}
+		return packageName, msg.GetName()
+	}
+	if len(typeNames) > 2 {
+		for i := 1; i < len(typeNames)-1; i++ {
+			packageName = strings.Join(typeNames[1:len(typeNames)-i], ".")
+			typeName = strings.Join(typeNames[len(typeNames)-i:], ".")
+			msg := desc.GetMessage(packageName, typeName)
+			if msg != nil {
+				typeNames := strings.Split(msg.GetName(), ".")
+				if len(typeNames) == 1 {
+					return packageName, msg.GetName()
+				}
+				return strings.Join(typeNames[1:len(typeNames)-1], "."), typeNames[len(typeNames)-1]
+			}
+		}
+	}
+	return "", ""
+}
+
+func (msg *DescriptorProto) GetFieldDescriptor(fieldName string) *FieldDescriptorProto {
+	for _, field := range msg.GetField() {
+		if field.GetName() == fieldName {
+			return field
+		}
+	}
+	return nil
+}
+
+func (desc *FileDescriptorSet) GetEnum(packageName string, typeName string) *EnumDescriptorProto {
+	for _, file := range desc.GetFile() {
+		if strings.Map(dotToUnderscore, file.GetPackage()) != strings.Map(dotToUnderscore, packageName) {
+			continue
+		}
+		for _, enum := range file.GetEnumType() {
+			if enum.GetName() == typeName {
+				return enum
+			}
+		}
+	}
+	return nil
+}
+
+func (f *FieldDescriptorProto) IsEnum() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_ENUM
+}
+
+func (f *FieldDescriptorProto) IsMessage() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_MESSAGE
+}
+
+func (f *FieldDescriptorProto) IsBytes() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_BYTES
+}
+
+func (f *FieldDescriptorProto) IsRepeated() bool {
+	return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REPEATED
+}
+
+func (f *FieldDescriptorProto) IsString() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_STRING
+}
+
+func (f *FieldDescriptorProto) IsBool() bool {
+	return *f.Type == FieldDescriptorProto_TYPE_BOOL
+}
+
+func (f *FieldDescriptorProto) IsRequired() bool {
+	return f.Label != nil && *f.Label == FieldDescriptorProto_LABEL_REQUIRED
+}
+
+func (f *FieldDescriptorProto) IsPacked() bool {
+	return f.Options != nil && f.GetOptions().GetPacked()
+}
+
+func (f *FieldDescriptorProto) IsPacked3() bool {
+	if f.IsRepeated() && f.IsScalar() {
+		if f.Options == nil || f.GetOptions().Packed == nil {
+			return true
+		}
+		return f.Options != nil && f.GetOptions().GetPacked()
+	}
+	return false
+}
+
+func (m *DescriptorProto) HasExtension() bool {
+	return len(m.ExtensionRange) > 0
+}
diff --git a/vendor/github.com/golang/protobuf/AUTHORS b/vendor/github.com/golang/protobuf/AUTHORS
new file mode 100644
index 0000000..15167cd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/vendor/github.com/golang/protobuf/CONTRIBUTORS b/vendor/github.com/golang/protobuf/CONTRIBUTORS
new file mode 100644
index 0000000..1c4577e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/vendor/github.com/golang/protobuf/LICENSE b/vendor/github.com/golang/protobuf/LICENSE
new file mode 100644
index 0000000..0f64693
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2010 The Go Authors.  All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+    * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+    * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+    * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/vendor/github.com/golang/protobuf/proto/buffer.go b/vendor/github.com/golang/protobuf/proto/buffer.go
new file mode 100644
index 0000000..e810e6f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/buffer.go
@@ -0,0 +1,324 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+	WireVarint     = 0
+	WireFixed32    = 5
+	WireFixed64    = 1
+	WireBytes      = 2
+	WireStartGroup = 3
+	WireEndGroup   = 4
+)
+
+// EncodeVarint returns the varint encoded bytes of v.
+func EncodeVarint(v uint64) []byte {
+	return protowire.AppendVarint(nil, v)
+}
+
+// SizeVarint returns the length of the varint encoded bytes of v.
+// This is equal to len(EncodeVarint(v)).
+func SizeVarint(v uint64) int {
+	return protowire.SizeVarint(v)
+}
+
+// DecodeVarint parses a varint encoded integer from b,
+// returning the integer value and the length of the varint.
+// It returns (0, 0) if there is a parse error.
+func DecodeVarint(b []byte) (uint64, int) {
+	v, n := protowire.ConsumeVarint(b)
+	if n < 0 {
+		return 0, 0
+	}
+	return v, n
+}
+
+// Buffer is a buffer for encoding and decoding the protobuf wire format.
+// It may be reused between invocations to reduce memory usage.
+type Buffer struct {
+	buf           []byte
+	idx           int
+	deterministic bool
+}
+
+// NewBuffer allocates a new Buffer initialized with buf,
+// where the contents of buf are considered the unread portion of the buffer.
+func NewBuffer(buf []byte) *Buffer {
+	return &Buffer{buf: buf}
+}
+
+// SetDeterministic specifies whether to use deterministic serialization.
+//
+// Deterministic serialization guarantees that for a given binary, equal
+// messages will always be serialized to the same bytes. This implies:
+//
+//   - Repeated serialization of a message will return the same bytes.
+//   - Different processes of the same binary (which may be executing on
+//     different machines) will serialize equal messages to the same bytes.
+//
+// Note that the deterministic serialization is NOT canonical across
+// languages. It is not guaranteed to remain stable over time. It is unstable
+// across different builds with schema changes due to unknown fields.
+// Users who need canonical serialization (e.g., persistent storage in a
+// canonical form, fingerprinting, etc.) should define their own
+// canonicalization specification and implement their own serializer rather
+// than relying on this API.
+//
+// If deterministic serialization is requested, map entries will be sorted
+// by keys in lexographical order. This is an implementation detail and
+// subject to change.
+func (b *Buffer) SetDeterministic(deterministic bool) {
+	b.deterministic = deterministic
+}
+
+// SetBuf sets buf as the internal buffer,
+// where the contents of buf are considered the unread portion of the buffer.
+func (b *Buffer) SetBuf(buf []byte) {
+	b.buf = buf
+	b.idx = 0
+}
+
+// Reset clears the internal buffer of all written and unread data.
+func (b *Buffer) Reset() {
+	b.buf = b.buf[:0]
+	b.idx = 0
+}
+
+// Bytes returns the internal buffer.
+func (b *Buffer) Bytes() []byte {
+	return b.buf
+}
+
+// Unread returns the unread portion of the buffer.
+func (b *Buffer) Unread() []byte {
+	return b.buf[b.idx:]
+}
+
+// Marshal appends the wire-format encoding of m to the buffer.
+func (b *Buffer) Marshal(m Message) error {
+	var err error
+	b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+	return err
+}
+
+// Unmarshal parses the wire-format message in the buffer and
+// places the decoded results in m.
+// It does not reset m before unmarshaling.
+func (b *Buffer) Unmarshal(m Message) error {
+	err := UnmarshalMerge(b.Unread(), m)
+	b.idx = len(b.buf)
+	return err
+}
+
+type unknownFields struct{ XXX_unrecognized protoimpl.UnknownFields }
+
+func (m *unknownFields) String() string { panic("not implemented") }
+func (m *unknownFields) Reset()         { panic("not implemented") }
+func (m *unknownFields) ProtoMessage()  { panic("not implemented") }
+
+// DebugPrint dumps the encoded bytes of b with a header and footer including s
+// to stdout. This is only intended for debugging.
+func (*Buffer) DebugPrint(s string, b []byte) {
+	m := MessageReflect(new(unknownFields))
+	m.SetUnknown(b)
+	b, _ = prototext.MarshalOptions{AllowPartial: true, Indent: "\t"}.Marshal(m.Interface())
+	fmt.Printf("==== %s ====\n%s==== %s ====\n", s, b, s)
+}
+
+// EncodeVarint appends an unsigned varint encoding to the buffer.
+func (b *Buffer) EncodeVarint(v uint64) error {
+	b.buf = protowire.AppendVarint(b.buf, v)
+	return nil
+}
+
+// EncodeZigzag32 appends a 32-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag32(v uint64) error {
+	return b.EncodeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31))))
+}
+
+// EncodeZigzag64 appends a 64-bit zig-zag varint encoding to the buffer.
+func (b *Buffer) EncodeZigzag64(v uint64) error {
+	return b.EncodeVarint(uint64((uint64(v) << 1) ^ uint64((int64(v) >> 63))))
+}
+
+// EncodeFixed32 appends a 32-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed32(v uint64) error {
+	b.buf = protowire.AppendFixed32(b.buf, uint32(v))
+	return nil
+}
+
+// EncodeFixed64 appends a 64-bit little-endian integer to the buffer.
+func (b *Buffer) EncodeFixed64(v uint64) error {
+	b.buf = protowire.AppendFixed64(b.buf, uint64(v))
+	return nil
+}
+
+// EncodeRawBytes appends a length-prefixed raw bytes to the buffer.
+func (b *Buffer) EncodeRawBytes(v []byte) error {
+	b.buf = protowire.AppendBytes(b.buf, v)
+	return nil
+}
+
+// EncodeStringBytes appends a length-prefixed raw bytes to the buffer.
+// It does not validate whether v contains valid UTF-8.
+func (b *Buffer) EncodeStringBytes(v string) error {
+	b.buf = protowire.AppendString(b.buf, v)
+	return nil
+}
+
+// EncodeMessage appends a length-prefixed encoded message to the buffer.
+func (b *Buffer) EncodeMessage(m Message) error {
+	var err error
+	b.buf = protowire.AppendVarint(b.buf, uint64(Size(m)))
+	b.buf, err = marshalAppend(b.buf, m, b.deterministic)
+	return err
+}
+
+// DecodeVarint consumes an encoded unsigned varint from the buffer.
+func (b *Buffer) DecodeVarint() (uint64, error) {
+	v, n := protowire.ConsumeVarint(b.buf[b.idx:])
+	if n < 0 {
+		return 0, protowire.ParseError(n)
+	}
+	b.idx += n
+	return uint64(v), nil
+}
+
+// DecodeZigzag32 consumes an encoded 32-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag32() (uint64, error) {
+	v, err := b.DecodeVarint()
+	if err != nil {
+		return 0, err
+	}
+	return uint64((uint32(v) >> 1) ^ uint32((int32(v&1)<<31)>>31)), nil
+}
+
+// DecodeZigzag64 consumes an encoded 64-bit zig-zag varint from the buffer.
+func (b *Buffer) DecodeZigzag64() (uint64, error) {
+	v, err := b.DecodeVarint()
+	if err != nil {
+		return 0, err
+	}
+	return uint64((uint64(v) >> 1) ^ uint64((int64(v&1)<<63)>>63)), nil
+}
+
+// DecodeFixed32 consumes a 32-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed32() (uint64, error) {
+	v, n := protowire.ConsumeFixed32(b.buf[b.idx:])
+	if n < 0 {
+		return 0, protowire.ParseError(n)
+	}
+	b.idx += n
+	return uint64(v), nil
+}
+
+// DecodeFixed64 consumes a 64-bit little-endian integer from the buffer.
+func (b *Buffer) DecodeFixed64() (uint64, error) {
+	v, n := protowire.ConsumeFixed64(b.buf[b.idx:])
+	if n < 0 {
+		return 0, protowire.ParseError(n)
+	}
+	b.idx += n
+	return uint64(v), nil
+}
+
+// DecodeRawBytes consumes a length-prefixed raw bytes from the buffer.
+// If alloc is specified, it returns a copy the raw bytes
+// rather than a sub-slice of the buffer.
+func (b *Buffer) DecodeRawBytes(alloc bool) ([]byte, error) {
+	v, n := protowire.ConsumeBytes(b.buf[b.idx:])
+	if n < 0 {
+		return nil, protowire.ParseError(n)
+	}
+	b.idx += n
+	if alloc {
+		v = append([]byte(nil), v...)
+	}
+	return v, nil
+}
+
+// DecodeStringBytes consumes a length-prefixed raw bytes from the buffer.
+// It does not validate whether the raw bytes contain valid UTF-8.
+func (b *Buffer) DecodeStringBytes() (string, error) {
+	v, n := protowire.ConsumeString(b.buf[b.idx:])
+	if n < 0 {
+		return "", protowire.ParseError(n)
+	}
+	b.idx += n
+	return v, nil
+}
+
+// DecodeMessage consumes a length-prefixed message from the buffer.
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeMessage(m Message) error {
+	v, err := b.DecodeRawBytes(false)
+	if err != nil {
+		return err
+	}
+	return UnmarshalMerge(v, m)
+}
+
+// DecodeGroup consumes a message group from the buffer.
+// It assumes that the start group marker has already been consumed and
+// consumes all bytes until (and including the end group marker).
+// It does not reset m before unmarshaling.
+func (b *Buffer) DecodeGroup(m Message) error {
+	v, n, err := consumeGroup(b.buf[b.idx:])
+	if err != nil {
+		return err
+	}
+	b.idx += n
+	return UnmarshalMerge(v, m)
+}
+
+// consumeGroup parses b until it finds an end group marker, returning
+// the raw bytes of the message (excluding the end group marker) and the
+// the total length of the message (including the end group marker).
+func consumeGroup(b []byte) ([]byte, int, error) {
+	b0 := b
+	depth := 1 // assume this follows a start group marker
+	for {
+		_, wtyp, tagLen := protowire.ConsumeTag(b)
+		if tagLen < 0 {
+			return nil, 0, protowire.ParseError(tagLen)
+		}
+		b = b[tagLen:]
+
+		var valLen int
+		switch wtyp {
+		case protowire.VarintType:
+			_, valLen = protowire.ConsumeVarint(b)
+		case protowire.Fixed32Type:
+			_, valLen = protowire.ConsumeFixed32(b)
+		case protowire.Fixed64Type:
+			_, valLen = protowire.ConsumeFixed64(b)
+		case protowire.BytesType:
+			_, valLen = protowire.ConsumeBytes(b)
+		case protowire.StartGroupType:
+			depth++
+		case protowire.EndGroupType:
+			depth--
+		default:
+			return nil, 0, errors.New("proto: cannot parse reserved wire type")
+		}
+		if valLen < 0 {
+			return nil, 0, protowire.ParseError(valLen)
+		}
+		b = b[valLen:]
+
+		if depth == 0 {
+			return b0[:len(b0)-len(b)-tagLen], len(b0) - len(b), nil
+		}
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/defaults.go b/vendor/github.com/golang/protobuf/proto/defaults.go
new file mode 100644
index 0000000..d399bf0
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/defaults.go
@@ -0,0 +1,63 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// SetDefaults sets unpopulated scalar fields to their default values.
+// Fields within a oneof are not set even if they have a default value.
+// SetDefaults is recursively called upon any populated message fields.
+func SetDefaults(m Message) {
+	if m != nil {
+		setDefaults(MessageReflect(m))
+	}
+}
+
+func setDefaults(m protoreflect.Message) {
+	fds := m.Descriptor().Fields()
+	for i := 0; i < fds.Len(); i++ {
+		fd := fds.Get(i)
+		if !m.Has(fd) {
+			if fd.HasDefault() && fd.ContainingOneof() == nil {
+				v := fd.Default()
+				if fd.Kind() == protoreflect.BytesKind {
+					v = protoreflect.ValueOf(append([]byte(nil), v.Bytes()...)) // copy the default bytes
+				}
+				m.Set(fd, v)
+			}
+			continue
+		}
+	}
+
+	m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		switch {
+		// Handle singular message.
+		case fd.Cardinality() != protoreflect.Repeated:
+			if fd.Message() != nil {
+				setDefaults(m.Get(fd).Message())
+			}
+		// Handle list of messages.
+		case fd.IsList():
+			if fd.Message() != nil {
+				ls := m.Get(fd).List()
+				for i := 0; i < ls.Len(); i++ {
+					setDefaults(ls.Get(i).Message())
+				}
+			}
+		// Handle map of messages.
+		case fd.IsMap():
+			if fd.MapValue().Message() != nil {
+				ms := m.Get(fd).Map()
+				ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+					setDefaults(v.Message())
+					return true
+				})
+			}
+		}
+		return true
+	})
+}
diff --git a/vendor/github.com/golang/protobuf/proto/deprecated.go b/vendor/github.com/golang/protobuf/proto/deprecated.go
new file mode 100644
index 0000000..e8db57e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/deprecated.go
@@ -0,0 +1,113 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"encoding/json"
+	"errors"
+	"fmt"
+	"strconv"
+
+	protoV2 "google.golang.org/protobuf/proto"
+)
+
+var (
+	// Deprecated: No longer returned.
+	ErrNil = errors.New("proto: Marshal called with nil")
+
+	// Deprecated: No longer returned.
+	ErrTooLarge = errors.New("proto: message encodes to over 2 GB")
+
+	// Deprecated: No longer returned.
+	ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof")
+)
+
+// Deprecated: Do not use.
+type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 }
+
+// Deprecated: Do not use.
+func GetStats() Stats { return Stats{} }
+
+// Deprecated: Do not use.
+func MarshalMessageSet(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSet([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func MarshalMessageSetJSON(interface{}) ([]byte, error) {
+	return nil, errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func UnmarshalMessageSetJSON([]byte, interface{}) error {
+	return errors.New("proto: not implemented")
+}
+
+// Deprecated: Do not use.
+func RegisterMessageSetType(Message, int32, string) {}
+
+// Deprecated: Do not use.
+func EnumName(m map[int32]string, v int32) string {
+	s, ok := m[v]
+	if ok {
+		return s
+	}
+	return strconv.Itoa(int(v))
+}
+
+// Deprecated: Do not use.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+	if data[0] == '"' {
+		// New style: enums are strings.
+		var repr string
+		if err := json.Unmarshal(data, &repr); err != nil {
+			return -1, err
+		}
+		val, ok := m[repr]
+		if !ok {
+			return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+		}
+		return val, nil
+	}
+	// Old style: enums are ints.
+	var val int32
+	if err := json.Unmarshal(data, &val); err != nil {
+		return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+	}
+	return val, nil
+}
+
+// Deprecated: Do not use; this type existed for intenal-use only.
+type InternalMessageInfo struct{}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) DiscardUnknown(m Message) {
+	DiscardUnknown(m)
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Marshal(b []byte, m Message, deterministic bool) ([]byte, error) {
+	return protoV2.MarshalOptions{Deterministic: deterministic}.MarshalAppend(b, MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Merge(dst, src Message) {
+	protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Size(m Message) int {
+	return protoV2.Size(MessageV2(m))
+}
+
+// Deprecated: Do not use; this method existed for intenal-use only.
+func (*InternalMessageInfo) Unmarshal(m Message, b []byte) error {
+	return protoV2.UnmarshalOptions{Merge: true}.Unmarshal(b, MessageV2(m))
+}
diff --git a/vendor/github.com/golang/protobuf/proto/discard.go b/vendor/github.com/golang/protobuf/proto/discard.go
new file mode 100644
index 0000000..2187e87
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/discard.go
@@ -0,0 +1,58 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"google.golang.org/protobuf/reflect/protoreflect"
+)
+
+// DiscardUnknown recursively discards all unknown fields from this message
+// and all embedded messages.
+//
+// When unmarshaling a message with unrecognized fields, the tags and values
+// of such fields are preserved in the Message. This allows a later call to
+// marshal to be able to produce a message that continues to have those
+// unrecognized fields. To avoid this, DiscardUnknown is used to
+// explicitly clear the unknown fields after unmarshaling.
+func DiscardUnknown(m Message) {
+	if m != nil {
+		discardUnknown(MessageReflect(m))
+	}
+}
+
+func discardUnknown(m protoreflect.Message) {
+	m.Range(func(fd protoreflect.FieldDescriptor, val protoreflect.Value) bool {
+		switch {
+		// Handle singular message.
+		case fd.Cardinality() != protoreflect.Repeated:
+			if fd.Message() != nil {
+				discardUnknown(m.Get(fd).Message())
+			}
+		// Handle list of messages.
+		case fd.IsList():
+			if fd.Message() != nil {
+				ls := m.Get(fd).List()
+				for i := 0; i < ls.Len(); i++ {
+					discardUnknown(ls.Get(i).Message())
+				}
+			}
+		// Handle map of messages.
+		case fd.IsMap():
+			if fd.MapValue().Message() != nil {
+				ms := m.Get(fd).Map()
+				ms.Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
+					discardUnknown(v.Message())
+					return true
+				})
+			}
+		}
+		return true
+	})
+
+	// Discard unknown fields.
+	if len(m.GetUnknown()) > 0 {
+		m.SetUnknown(nil)
+	}
+}
diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 0000000..42fc120
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,356 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"errors"
+	"fmt"
+	"reflect"
+
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+type (
+	// ExtensionDesc represents an extension descriptor and
+	// is used to interact with an extension field in a message.
+	//
+	// Variables of this type are generated in code by protoc-gen-go.
+	ExtensionDesc = protoimpl.ExtensionInfo
+
+	// ExtensionRange represents a range of message extensions.
+	// Used in code generated by protoc-gen-go.
+	ExtensionRange = protoiface.ExtensionRangeV1
+
+	// Deprecated: Do not use; this is an internal type.
+	Extension = protoimpl.ExtensionFieldV1
+
+	// Deprecated: Do not use; this is an internal type.
+	XXX_InternalExtensions = protoimpl.ExtensionFields
+)
+
+// ErrMissingExtension reports whether the extension was not present.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+var errNotExtendable = errors.New("proto: not an extendable proto.Message")
+
+// HasExtension reports whether the extension field is present in m
+// either as an explicitly populated field or as an unknown field.
+func HasExtension(m Message, xt *ExtensionDesc) (has bool) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return false
+	}
+
+	// Check whether any populated known field matches the field number.
+	xtd := xt.TypeDescriptor()
+	if isValidExtension(mr.Descriptor(), xtd) {
+		has = mr.Has(xtd)
+	} else {
+		mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+			has = int32(fd.Number()) == xt.Field
+			return !has
+		})
+	}
+
+	// Check whether any unknown field matches the field number.
+	for b := mr.GetUnknown(); !has && len(b) > 0; {
+		num, _, n := protowire.ConsumeField(b)
+		has = int32(num) == xt.Field
+		b = b[n:]
+	}
+	return has
+}
+
+// ClearExtension removes the extension field from m
+// either as an explicitly populated field or as an unknown field.
+func ClearExtension(m Message, xt *ExtensionDesc) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return
+	}
+
+	xtd := xt.TypeDescriptor()
+	if isValidExtension(mr.Descriptor(), xtd) {
+		mr.Clear(xtd)
+	} else {
+		mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+			if int32(fd.Number()) == xt.Field {
+				mr.Clear(fd)
+				return false
+			}
+			return true
+		})
+	}
+	clearUnknown(mr, fieldNum(xt.Field))
+}
+
+// ClearAllExtensions clears all extensions from m.
+// This includes populated fields and unknown fields in the extension range.
+func ClearAllExtensions(m Message) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return
+	}
+
+	mr.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
+		if fd.IsExtension() {
+			mr.Clear(fd)
+		}
+		return true
+	})
+	clearUnknown(mr, mr.Descriptor().ExtensionRanges())
+}
+
+// GetExtension retrieves a proto2 extended field from m.
+//
+// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil),
+// then GetExtension parses the encoded field and returns a Go value of the specified type.
+// If the field is not present, then the default value is returned (if one is specified),
+// otherwise ErrMissingExtension is reported.
+//
+// If the descriptor is type incomplete (i.e., ExtensionDesc.ExtensionType is nil),
+// then GetExtension returns the raw encoded bytes for the extension field.
+func GetExtension(m Message, xt *ExtensionDesc) (interface{}, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+		return nil, errNotExtendable
+	}
+
+	// Retrieve the unknown fields for this extension field.
+	var bo protoreflect.RawFields
+	for bi := mr.GetUnknown(); len(bi) > 0; {
+		num, _, n := protowire.ConsumeField(bi)
+		if int32(num) == xt.Field {
+			bo = append(bo, bi[:n]...)
+		}
+		bi = bi[n:]
+	}
+
+	// For type incomplete descriptors, only retrieve the unknown fields.
+	if xt.ExtensionType == nil {
+		return []byte(bo), nil
+	}
+
+	// If the extension field only exists as unknown fields, unmarshal it.
+	// This is rarely done since proto.Unmarshal eagerly unmarshals extensions.
+	xtd := xt.TypeDescriptor()
+	if !isValidExtension(mr.Descriptor(), xtd) {
+		return nil, fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+	}
+	if !mr.Has(xtd) && len(bo) > 0 {
+		m2 := mr.New()
+		if err := (proto.UnmarshalOptions{
+			Resolver: extensionResolver{xt},
+		}.Unmarshal(bo, m2.Interface())); err != nil {
+			return nil, err
+		}
+		if m2.Has(xtd) {
+			mr.Set(xtd, m2.Get(xtd))
+			clearUnknown(mr, fieldNum(xt.Field))
+		}
+	}
+
+	// Check whether the message has the extension field set or a default.
+	var pv protoreflect.Value
+	switch {
+	case mr.Has(xtd):
+		pv = mr.Get(xtd)
+	case xtd.HasDefault():
+		pv = xtd.Default()
+	default:
+		return nil, ErrMissingExtension
+	}
+
+	v := xt.InterfaceOf(pv)
+	rv := reflect.ValueOf(v)
+	if isScalarKind(rv.Kind()) {
+		rv2 := reflect.New(rv.Type())
+		rv2.Elem().Set(rv)
+		v = rv2.Interface()
+	}
+	return v, nil
+}
+
+// extensionResolver is a custom extension resolver that stores a single
+// extension type that takes precedence over the global registry.
+type extensionResolver struct{ xt protoreflect.ExtensionType }
+
+func (r extensionResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
+	if xtd := r.xt.TypeDescriptor(); xtd.FullName() == field {
+		return r.xt, nil
+	}
+	return protoregistry.GlobalTypes.FindExtensionByName(field)
+}
+
+func (r extensionResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
+	if xtd := r.xt.TypeDescriptor(); xtd.ContainingMessage().FullName() == message && xtd.Number() == field {
+		return r.xt, nil
+	}
+	return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
+}
+
+// GetExtensions returns a list of the extensions values present in m,
+// corresponding with the provided list of extension descriptors, xts.
+// If an extension is missing in m, the corresponding value is nil.
+func GetExtensions(m Message, xts []*ExtensionDesc) ([]interface{}, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return nil, errNotExtendable
+	}
+
+	vs := make([]interface{}, len(xts))
+	for i, xt := range xts {
+		v, err := GetExtension(m, xt)
+		if err != nil {
+			if err == ErrMissingExtension {
+				continue
+			}
+			return vs, err
+		}
+		vs[i] = v
+	}
+	return vs, nil
+}
+
+// SetExtension sets an extension field in m to the provided value.
+func SetExtension(m Message, xt *ExtensionDesc, v interface{}) error {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+		return errNotExtendable
+	}
+
+	rv := reflect.ValueOf(v)
+	if reflect.TypeOf(v) != reflect.TypeOf(xt.ExtensionType) {
+		return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", v, xt.ExtensionType)
+	}
+	if rv.Kind() == reflect.Ptr {
+		if rv.IsNil() {
+			return fmt.Errorf("proto: SetExtension called with nil value of type %T", v)
+		}
+		if isScalarKind(rv.Elem().Kind()) {
+			v = rv.Elem().Interface()
+		}
+	}
+
+	xtd := xt.TypeDescriptor()
+	if !isValidExtension(mr.Descriptor(), xtd) {
+		return fmt.Errorf("proto: bad extended type; %T does not extend %T", xt.ExtendedType, m)
+	}
+	mr.Set(xtd, xt.ValueOf(v))
+	clearUnknown(mr, fieldNum(xt.Field))
+	return nil
+}
+
+// SetRawExtension inserts b into the unknown fields of m.
+//
+// Deprecated: Use Message.ProtoReflect.SetUnknown instead.
+func SetRawExtension(m Message, fnum int32, b []byte) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return
+	}
+
+	// Verify that the raw field is valid.
+	for b0 := b; len(b0) > 0; {
+		num, _, n := protowire.ConsumeField(b0)
+		if int32(num) != fnum {
+			panic(fmt.Sprintf("mismatching field number: got %d, want %d", num, fnum))
+		}
+		b0 = b0[n:]
+	}
+
+	ClearExtension(m, &ExtensionDesc{Field: fnum})
+	mr.SetUnknown(append(mr.GetUnknown(), b...))
+}
+
+// ExtensionDescs returns a list of extension descriptors found in m,
+// containing descriptors for both populated extension fields in m and
+// also unknown fields of m that are in the extension range.
+// For the later case, an type incomplete descriptor is provided where only
+// the ExtensionDesc.Field field is populated.
+// The order of the extension descriptors is undefined.
+func ExtensionDescs(m Message) ([]*ExtensionDesc, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() || mr.Descriptor().ExtensionRanges().Len() == 0 {
+		return nil, errNotExtendable
+	}
+
+	// Collect a set of known extension descriptors.
+	extDescs := make(map[protoreflect.FieldNumber]*ExtensionDesc)
+	mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		if fd.IsExtension() {
+			xt := fd.(protoreflect.ExtensionTypeDescriptor)
+			if xd, ok := xt.Type().(*ExtensionDesc); ok {
+				extDescs[fd.Number()] = xd
+			}
+		}
+		return true
+	})
+
+	// Collect a set of unknown extension descriptors.
+	extRanges := mr.Descriptor().ExtensionRanges()
+	for b := mr.GetUnknown(); len(b) > 0; {
+		num, _, n := protowire.ConsumeField(b)
+		if extRanges.Has(num) && extDescs[num] == nil {
+			extDescs[num] = nil
+		}
+		b = b[n:]
+	}
+
+	// Transpose the set of descriptors into a list.
+	var xts []*ExtensionDesc
+	for num, xt := range extDescs {
+		if xt == nil {
+			xt = &ExtensionDesc{Field: int32(num)}
+		}
+		xts = append(xts, xt)
+	}
+	return xts, nil
+}
+
+// isValidExtension reports whether xtd is a valid extension descriptor for md.
+func isValidExtension(md protoreflect.MessageDescriptor, xtd protoreflect.ExtensionTypeDescriptor) bool {
+	return xtd.ContainingMessage() == md && md.ExtensionRanges().Has(xtd.Number())
+}
+
+// isScalarKind reports whether k is a protobuf scalar kind (except bytes).
+// This function exists for historical reasons since the representation of
+// scalars differs between v1 and v2, where v1 uses *T and v2 uses T.
+func isScalarKind(k reflect.Kind) bool {
+	switch k {
+	case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String:
+		return true
+	default:
+		return false
+	}
+}
+
+// clearUnknown removes unknown fields from m where remover.Has reports true.
+func clearUnknown(m protoreflect.Message, remover interface {
+	Has(protoreflect.FieldNumber) bool
+}) {
+	var bo protoreflect.RawFields
+	for bi := m.GetUnknown(); len(bi) > 0; {
+		num, _, n := protowire.ConsumeField(bi)
+		if !remover.Has(num) {
+			bo = append(bo, bi[:n]...)
+		}
+		bi = bi[n:]
+	}
+	if bi := m.GetUnknown(); len(bi) != len(bo) {
+		m.SetUnknown(bo)
+	}
+}
+
+type fieldNum protoreflect.FieldNumber
+
+func (n1 fieldNum) Has(n2 protoreflect.FieldNumber) bool {
+	return protoreflect.FieldNumber(n1) == n2
+}
diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 0000000..dcdc220
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,306 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// StructProperties represents protocol buffer type information for a
+// generated protobuf message in the open-struct API.
+//
+// Deprecated: Do not use.
+type StructProperties struct {
+	// Prop are the properties for each field.
+	//
+	// Fields belonging to a oneof are stored in OneofTypes instead, with a
+	// single Properties representing the parent oneof held here.
+	//
+	// The order of Prop matches the order of fields in the Go struct.
+	// Struct fields that are not related to protobufs have a "XXX_" prefix
+	// in the Properties.Name and must be ignored by the user.
+	Prop []*Properties
+
+	// OneofTypes contains information about the oneof fields in this message.
+	// It is keyed by the protobuf field name.
+	OneofTypes map[string]*OneofProperties
+}
+
+// Properties represents the type information for a protobuf message field.
+//
+// Deprecated: Do not use.
+type Properties struct {
+	// Name is a placeholder name with little meaningful semantic value.
+	// If the name has an "XXX_" prefix, the entire Properties must be ignored.
+	Name string
+	// OrigName is the protobuf field name or oneof name.
+	OrigName string
+	// JSONName is the JSON name for the protobuf field.
+	JSONName string
+	// Enum is a placeholder name for enums.
+	// For historical reasons, this is neither the Go name for the enum,
+	// nor the protobuf name for the enum.
+	Enum string // Deprecated: Do not use.
+	// Weak contains the full name of the weakly referenced message.
+	Weak string
+	// Wire is a string representation of the wire type.
+	Wire string
+	// WireType is the protobuf wire type for the field.
+	WireType int
+	// Tag is the protobuf field number.
+	Tag int
+	// Required reports whether this is a required field.
+	Required bool
+	// Optional reports whether this is a optional field.
+	Optional bool
+	// Repeated reports whether this is a repeated field.
+	Repeated bool
+	// Packed reports whether this is a packed repeated field of scalars.
+	Packed bool
+	// Proto3 reports whether this field operates under the proto3 syntax.
+	Proto3 bool
+	// Oneof reports whether this field belongs within a oneof.
+	Oneof bool
+
+	// Default is the default value in string form.
+	Default string
+	// HasDefault reports whether the field has a default value.
+	HasDefault bool
+
+	// MapKeyProp is the properties for the key field for a map field.
+	MapKeyProp *Properties
+	// MapValProp is the properties for the value field for a map field.
+	MapValProp *Properties
+}
+
+// OneofProperties represents the type information for a protobuf oneof.
+//
+// Deprecated: Do not use.
+type OneofProperties struct {
+	// Type is a pointer to the generated wrapper type for the field value.
+	// This is nil for messages that are not in the open-struct API.
+	Type reflect.Type
+	// Field is the index into StructProperties.Prop for the containing oneof.
+	Field int
+	// Prop is the properties for the field.
+	Prop *Properties
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+	s := p.Wire
+	s += "," + strconv.Itoa(p.Tag)
+	if p.Required {
+		s += ",req"
+	}
+	if p.Optional {
+		s += ",opt"
+	}
+	if p.Repeated {
+		s += ",rep"
+	}
+	if p.Packed {
+		s += ",packed"
+	}
+	s += ",name=" + p.OrigName
+	if p.JSONName != "" {
+		s += ",json=" + p.JSONName
+	}
+	if len(p.Enum) > 0 {
+		s += ",enum=" + p.Enum
+	}
+	if len(p.Weak) > 0 {
+		s += ",weak=" + p.Weak
+	}
+	if p.Proto3 {
+		s += ",proto3"
+	}
+	if p.Oneof {
+		s += ",oneof"
+	}
+	if p.HasDefault {
+		s += ",def=" + p.Default
+	}
+	return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(tag string) {
+	// For example: "bytes,49,opt,name=foo,def=hello!"
+	for len(tag) > 0 {
+		i := strings.IndexByte(tag, ',')
+		if i < 0 {
+			i = len(tag)
+		}
+		switch s := tag[:i]; {
+		case strings.HasPrefix(s, "name="):
+			p.OrigName = s[len("name="):]
+		case strings.HasPrefix(s, "json="):
+			p.JSONName = s[len("json="):]
+		case strings.HasPrefix(s, "enum="):
+			p.Enum = s[len("enum="):]
+		case strings.HasPrefix(s, "weak="):
+			p.Weak = s[len("weak="):]
+		case strings.Trim(s, "0123456789") == "":
+			n, _ := strconv.ParseUint(s, 10, 32)
+			p.Tag = int(n)
+		case s == "opt":
+			p.Optional = true
+		case s == "req":
+			p.Required = true
+		case s == "rep":
+			p.Repeated = true
+		case s == "varint" || s == "zigzag32" || s == "zigzag64":
+			p.Wire = s
+			p.WireType = WireVarint
+		case s == "fixed32":
+			p.Wire = s
+			p.WireType = WireFixed32
+		case s == "fixed64":
+			p.Wire = s
+			p.WireType = WireFixed64
+		case s == "bytes":
+			p.Wire = s
+			p.WireType = WireBytes
+		case s == "group":
+			p.Wire = s
+			p.WireType = WireStartGroup
+		case s == "packed":
+			p.Packed = true
+		case s == "proto3":
+			p.Proto3 = true
+		case s == "oneof":
+			p.Oneof = true
+		case strings.HasPrefix(s, "def="):
+			// The default tag is special in that everything afterwards is the
+			// default regardless of the presence of commas.
+			p.HasDefault = true
+			p.Default, i = tag[len("def="):], len(tag)
+		}
+		tag = strings.TrimPrefix(tag[i:], ",")
+	}
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+//
+// Deprecated: Do not use.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+	p.Name = name
+	p.OrigName = name
+	if tag == "" {
+		return
+	}
+	p.Parse(tag)
+
+	if typ != nil && typ.Kind() == reflect.Map {
+		p.MapKeyProp = new(Properties)
+		p.MapKeyProp.Init(nil, "Key", f.Tag.Get("protobuf_key"), nil)
+		p.MapValProp = new(Properties)
+		p.MapValProp.Init(nil, "Value", f.Tag.Get("protobuf_val"), nil)
+	}
+}
+
+var propertiesCache sync.Map // map[reflect.Type]*StructProperties
+
+// GetProperties returns the list of properties for the type represented by t,
+// which must be a generated protocol buffer message in the open-struct API,
+// where protobuf message fields are represented by exported Go struct fields.
+//
+// Deprecated: Use protobuf reflection instead.
+func GetProperties(t reflect.Type) *StructProperties {
+	if p, ok := propertiesCache.Load(t); ok {
+		return p.(*StructProperties)
+	}
+	p, _ := propertiesCache.LoadOrStore(t, newProperties(t))
+	return p.(*StructProperties)
+}
+
+func newProperties(t reflect.Type) *StructProperties {
+	if t.Kind() != reflect.Struct {
+		panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+	}
+
+	var hasOneof bool
+	prop := new(StructProperties)
+
+	// Construct a list of properties for each field in the struct.
+	for i := 0; i < t.NumField(); i++ {
+		p := new(Properties)
+		f := t.Field(i)
+		tagField := f.Tag.Get("protobuf")
+		p.Init(f.Type, f.Name, tagField, &f)
+
+		tagOneof := f.Tag.Get("protobuf_oneof")
+		if tagOneof != "" {
+			hasOneof = true
+			p.OrigName = tagOneof
+		}
+
+		// Rename unrelated struct fields with the "XXX_" prefix since so much
+		// user code simply checks for this to exclude special fields.
+		if tagField == "" && tagOneof == "" && !strings.HasPrefix(p.Name, "XXX_") {
+			p.Name = "XXX_" + p.Name
+			p.OrigName = "XXX_" + p.OrigName
+		} else if p.Weak != "" {
+			p.Name = p.OrigName // avoid possible "XXX_" prefix on weak field
+		}
+
+		prop.Prop = append(prop.Prop, p)
+	}
+
+	// Construct a mapping of oneof field names to properties.
+	if hasOneof {
+		var oneofWrappers []interface{}
+		if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofFuncs"); ok {
+			oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[3].Interface().([]interface{})
+		}
+		if fn, ok := reflect.PtrTo(t).MethodByName("XXX_OneofWrappers"); ok {
+			oneofWrappers = fn.Func.Call([]reflect.Value{reflect.Zero(fn.Type.In(0))})[0].Interface().([]interface{})
+		}
+		if m, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(protoreflect.ProtoMessage); ok {
+			if m, ok := m.ProtoReflect().(interface{ ProtoMessageInfo() *protoimpl.MessageInfo }); ok {
+				oneofWrappers = m.ProtoMessageInfo().OneofWrappers
+			}
+		}
+
+		prop.OneofTypes = make(map[string]*OneofProperties)
+		for _, wrapper := range oneofWrappers {
+			p := &OneofProperties{
+				Type: reflect.ValueOf(wrapper).Type(), // *T
+				Prop: new(Properties),
+			}
+			f := p.Type.Elem().Field(0)
+			p.Prop.Name = f.Name
+			p.Prop.Parse(f.Tag.Get("protobuf"))
+
+			// Determine the struct field that contains this oneof.
+			// Each wrapper is assignable to exactly one parent field.
+			var foundOneof bool
+			for i := 0; i < t.NumField() && !foundOneof; i++ {
+				if p.Type.AssignableTo(t.Field(i).Type) {
+					p.Field = i
+					foundOneof = true
+				}
+			}
+			if !foundOneof {
+				panic(fmt.Sprintf("%v is not a generated message in the open-struct API", t))
+			}
+			prop.OneofTypes[p.Prop.OrigName] = p
+		}
+	}
+
+	return prop
+}
+
+func (sp *StructProperties) Len() int           { return len(sp.Prop) }
+func (sp *StructProperties) Less(i, j int) bool { return false }
+func (sp *StructProperties) Swap(i, j int)      { return }
diff --git a/vendor/github.com/golang/protobuf/proto/proto.go b/vendor/github.com/golang/protobuf/proto/proto.go
new file mode 100644
index 0000000..5aee89c
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/proto.go
@@ -0,0 +1,167 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package proto provides functionality for handling protocol buffer messages.
+// In particular, it provides marshaling and unmarshaling between a protobuf
+// message and the binary wire format.
+//
+// See https://developers.google.com/protocol-buffers/docs/gotutorial for
+// more information.
+//
+// Deprecated: Use the "google.golang.org/protobuf/proto" package instead.
+package proto
+
+import (
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/runtime/protoiface"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+const (
+	ProtoPackageIsVersion1 = true
+	ProtoPackageIsVersion2 = true
+	ProtoPackageIsVersion3 = true
+	ProtoPackageIsVersion4 = true
+)
+
+// GeneratedEnum is any enum type generated by protoc-gen-go
+// which is a named int32 kind.
+// This type exists for documentation purposes.
+type GeneratedEnum interface{}
+
+// GeneratedMessage is any message type generated by protoc-gen-go
+// which is a pointer to a named struct kind.
+// This type exists for documentation purposes.
+type GeneratedMessage interface{}
+
+// Message is a protocol buffer message.
+//
+// This is the v1 version of the message interface and is marginally better
+// than an empty interface as it lacks any method to programatically interact
+// with the contents of the message.
+//
+// A v2 message is declared in "google.golang.org/protobuf/proto".Message and
+// exposes protobuf reflection as a first-class feature of the interface.
+//
+// To convert a v1 message to a v2 message, use the MessageV2 function.
+// To convert a v2 message to a v1 message, use the MessageV1 function.
+type Message = protoiface.MessageV1
+
+// MessageV1 converts either a v1 or v2 message to a v1 message.
+// It returns nil if m is nil.
+func MessageV1(m GeneratedMessage) protoiface.MessageV1 {
+	return protoimpl.X.ProtoMessageV1Of(m)
+}
+
+// MessageV2 converts either a v1 or v2 message to a v2 message.
+// It returns nil if m is nil.
+func MessageV2(m GeneratedMessage) protoV2.Message {
+	return protoimpl.X.ProtoMessageV2Of(m)
+}
+
+// MessageReflect returns a reflective view for a message.
+// It returns nil if m is nil.
+func MessageReflect(m Message) protoreflect.Message {
+	return protoimpl.X.MessageOf(m)
+}
+
+// Marshaler is implemented by messages that can marshal themselves.
+// This interface is used by the following functions: Size, Marshal,
+// Buffer.Marshal, and Buffer.EncodeMessage.
+//
+// Deprecated: Do not implement.
+type Marshaler interface {
+	// Marshal formats the encoded bytes of the message.
+	// It should be deterministic and emit valid protobuf wire data.
+	// The caller takes ownership of the returned buffer.
+	Marshal() ([]byte, error)
+}
+
+// Unmarshaler is implemented by messages that can unmarshal themselves.
+// This interface is used by the following functions: Unmarshal, UnmarshalMerge,
+// Buffer.Unmarshal, Buffer.DecodeMessage, and Buffer.DecodeGroup.
+//
+// Deprecated: Do not implement.
+type Unmarshaler interface {
+	// Unmarshal parses the encoded bytes of the protobuf wire input.
+	// The provided buffer is only valid for during method call.
+	// It should not reset the receiver message.
+	Unmarshal([]byte) error
+}
+
+// Merger is implemented by messages that can merge themselves.
+// This interface is used by the following functions: Clone and Merge.
+//
+// Deprecated: Do not implement.
+type Merger interface {
+	// Merge merges the contents of src into the receiver message.
+	// It clones all data structures in src such that it aliases no mutable
+	// memory referenced by src.
+	Merge(src Message)
+}
+
+// RequiredNotSetError is an error type returned when
+// marshaling or unmarshaling a message with missing required fields.
+type RequiredNotSetError struct {
+	err error
+}
+
+func (e *RequiredNotSetError) Error() string {
+	if e.err != nil {
+		return e.err.Error()
+	}
+	return "proto: required field not set"
+}
+func (e *RequiredNotSetError) RequiredNotSet() bool {
+	return true
+}
+
+func checkRequiredNotSet(m protoV2.Message) error {
+	if err := protoV2.CheckInitialized(m); err != nil {
+		return &RequiredNotSetError{err: err}
+	}
+	return nil
+}
+
+// Clone returns a deep copy of src.
+func Clone(src Message) Message {
+	return MessageV1(protoV2.Clone(MessageV2(src)))
+}
+
+// Merge merges src into dst, which must be messages of the same type.
+//
+// Populated scalar fields in src are copied to dst, while populated
+// singular messages in src are merged into dst by recursively calling Merge.
+// The elements of every list field in src is appended to the corresponded
+// list fields in dst. The entries of every map field in src is copied into
+// the corresponding map field in dst, possibly replacing existing entries.
+// The unknown fields of src are appended to the unknown fields of dst.
+func Merge(dst, src Message) {
+	protoV2.Merge(MessageV2(dst), MessageV2(src))
+}
+
+// Equal reports whether two messages are equal.
+// If two messages marshal to the same bytes under deterministic serialization,
+// then Equal is guaranteed to report true.
+//
+// Two messages are equal if they are the same protobuf message type,
+// have the same set of populated known and extension field values,
+// and the same set of unknown fields values.
+//
+// Scalar values are compared with the equivalent of the == operator in Go,
+// except bytes values which are compared using bytes.Equal and
+// floating point values which specially treat NaNs as equal.
+// Message values are compared by recursively calling Equal.
+// Lists are equal if each element value is also equal.
+// Maps are equal if they have the same set of keys, where the pair of values
+// for each key is also equal.
+func Equal(x, y Message) bool {
+	return protoV2.Equal(MessageV2(x), MessageV2(y))
+}
+
+func isMessageSet(md protoreflect.MessageDescriptor) bool {
+	ms, ok := md.(interface{ IsMessageSet() bool })
+	return ok && ms.IsMessageSet()
+}
diff --git a/vendor/github.com/golang/protobuf/proto/registry.go b/vendor/github.com/golang/protobuf/proto/registry.go
new file mode 100644
index 0000000..066b432
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/registry.go
@@ -0,0 +1,317 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"bytes"
+	"compress/gzip"
+	"fmt"
+	"io/ioutil"
+	"reflect"
+	"strings"
+	"sync"
+
+	"google.golang.org/protobuf/reflect/protodesc"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+	"google.golang.org/protobuf/runtime/protoimpl"
+)
+
+// filePath is the path to the proto source file.
+type filePath = string // e.g., "google/protobuf/descriptor.proto"
+
+// fileDescGZIP is the compressed contents of the encoded FileDescriptorProto.
+type fileDescGZIP = []byte
+
+var fileCache sync.Map // map[filePath]fileDescGZIP
+
+// RegisterFile is called from generated code to register the compressed
+// FileDescriptorProto with the file path for a proto source file.
+//
+// Deprecated: Use protoregistry.GlobalFiles.RegisterFile instead.
+func RegisterFile(s filePath, d fileDescGZIP) {
+	// Decompress the descriptor.
+	zr, err := gzip.NewReader(bytes.NewReader(d))
+	if err != nil {
+		panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+	}
+	b, err := ioutil.ReadAll(zr)
+	if err != nil {
+		panic(fmt.Sprintf("proto: invalid compressed file descriptor: %v", err))
+	}
+
+	// Construct a protoreflect.FileDescriptor from the raw descriptor.
+	// Note that DescBuilder.Build automatically registers the constructed
+	// file descriptor with the v2 registry.
+	protoimpl.DescBuilder{RawDescriptor: b}.Build()
+
+	// Locally cache the raw descriptor form for the file.
+	fileCache.Store(s, d)
+}
+
+// FileDescriptor returns the compressed FileDescriptorProto given the file path
+// for a proto source file. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalFiles.FindFileByPath instead.
+func FileDescriptor(s filePath) fileDescGZIP {
+	if v, ok := fileCache.Load(s); ok {
+		return v.(fileDescGZIP)
+	}
+
+	// Find the descriptor in the v2 registry.
+	var b []byte
+	if fd, _ := protoregistry.GlobalFiles.FindFileByPath(s); fd != nil {
+		b, _ = Marshal(protodesc.ToFileDescriptorProto(fd))
+	}
+
+	// Locally cache the raw descriptor form for the file.
+	if len(b) > 0 {
+		v, _ := fileCache.LoadOrStore(s, protoimpl.X.CompressGZIP(b))
+		return v.(fileDescGZIP)
+	}
+	return nil
+}
+
+// enumName is the name of an enum. For historical reasons, the enum name is
+// neither the full Go name nor the full protobuf name of the enum.
+// The name is the dot-separated combination of just the proto package that the
+// enum is declared within followed by the Go type name of the generated enum.
+type enumName = string // e.g., "my.proto.package.GoMessage_GoEnum"
+
+// enumsByName maps enum values by name to their numeric counterpart.
+type enumsByName = map[string]int32
+
+// enumsByNumber maps enum values by number to their name counterpart.
+type enumsByNumber = map[int32]string
+
+var enumCache sync.Map     // map[enumName]enumsByName
+var numFilesCache sync.Map // map[protoreflect.FullName]int
+
+// RegisterEnum is called from the generated code to register the mapping of
+// enum value names to enum numbers for the enum identified by s.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterEnum instead.
+func RegisterEnum(s enumName, _ enumsByNumber, m enumsByName) {
+	if _, ok := enumCache.Load(s); ok {
+		panic("proto: duplicate enum registered: " + s)
+	}
+	enumCache.Store(s, m)
+
+	// This does not forward registration to the v2 registry since this API
+	// lacks sufficient information to construct a complete v2 enum descriptor.
+}
+
+// EnumValueMap returns the mapping from enum value names to enum numbers for
+// the enum of the given name. It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindEnumByName instead.
+func EnumValueMap(s enumName) enumsByName {
+	if v, ok := enumCache.Load(s); ok {
+		return v.(enumsByName)
+	}
+
+	// Check whether the cache is stale. If the number of files in the current
+	// package differs, then it means that some enums may have been recently
+	// registered upstream that we do not know about.
+	var protoPkg protoreflect.FullName
+	if i := strings.LastIndexByte(s, '.'); i >= 0 {
+		protoPkg = protoreflect.FullName(s[:i])
+	}
+	v, _ := numFilesCache.Load(protoPkg)
+	numFiles, _ := v.(int)
+	if protoregistry.GlobalFiles.NumFilesByPackage(protoPkg) == numFiles {
+		return nil // cache is up-to-date; was not found earlier
+	}
+
+	// Update the enum cache for all enums declared in the given proto package.
+	numFiles = 0
+	protoregistry.GlobalFiles.RangeFilesByPackage(protoPkg, func(fd protoreflect.FileDescriptor) bool {
+		walkEnums(fd, func(ed protoreflect.EnumDescriptor) {
+			name := protoimpl.X.LegacyEnumName(ed)
+			if _, ok := enumCache.Load(name); !ok {
+				m := make(enumsByName)
+				evs := ed.Values()
+				for i := evs.Len() - 1; i >= 0; i-- {
+					ev := evs.Get(i)
+					m[string(ev.Name())] = int32(ev.Number())
+				}
+				enumCache.LoadOrStore(name, m)
+			}
+		})
+		numFiles++
+		return true
+	})
+	numFilesCache.Store(protoPkg, numFiles)
+
+	// Check cache again for enum map.
+	if v, ok := enumCache.Load(s); ok {
+		return v.(enumsByName)
+	}
+	return nil
+}
+
+// walkEnums recursively walks all enums declared in d.
+func walkEnums(d interface {
+	Enums() protoreflect.EnumDescriptors
+	Messages() protoreflect.MessageDescriptors
+}, f func(protoreflect.EnumDescriptor)) {
+	eds := d.Enums()
+	for i := eds.Len() - 1; i >= 0; i-- {
+		f(eds.Get(i))
+	}
+	mds := d.Messages()
+	for i := mds.Len() - 1; i >= 0; i-- {
+		walkEnums(mds.Get(i), f)
+	}
+}
+
+// messageName is the full name of protobuf message.
+type messageName = string
+
+var messageTypeCache sync.Map // map[messageName]reflect.Type
+
+// RegisterType is called from generated code to register the message Go type
+// for a message of the given name.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterMessage instead.
+func RegisterType(m Message, s messageName) {
+	mt := protoimpl.X.LegacyMessageTypeOf(m, protoreflect.FullName(s))
+	if err := protoregistry.GlobalTypes.RegisterMessage(mt); err != nil {
+		panic(err)
+	}
+	messageTypeCache.Store(s, reflect.TypeOf(m))
+}
+
+// RegisterMapType is called from generated code to register the Go map type
+// for a protobuf message representing a map entry.
+//
+// Deprecated: Do not use.
+func RegisterMapType(m interface{}, s messageName) {
+	t := reflect.TypeOf(m)
+	if t.Kind() != reflect.Map {
+		panic(fmt.Sprintf("invalid map kind: %v", t))
+	}
+	if _, ok := messageTypeCache.Load(s); ok {
+		panic(fmt.Errorf("proto: duplicate proto message registered: %s", s))
+	}
+	messageTypeCache.Store(s, t)
+}
+
+// MessageType returns the message type for a named message.
+// It returns nil if not found.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead.
+func MessageType(s messageName) reflect.Type {
+	if v, ok := messageTypeCache.Load(s); ok {
+		return v.(reflect.Type)
+	}
+
+	// Derive the message type from the v2 registry.
+	var t reflect.Type
+	if mt, _ := protoregistry.GlobalTypes.FindMessageByName(protoreflect.FullName(s)); mt != nil {
+		t = messageGoType(mt)
+	}
+
+	// If we could not get a concrete type, it is possible that it is a
+	// pseudo-message for a map entry.
+	if t == nil {
+		d, _ := protoregistry.GlobalFiles.FindDescriptorByName(protoreflect.FullName(s))
+		if md, _ := d.(protoreflect.MessageDescriptor); md != nil && md.IsMapEntry() {
+			kt := goTypeForField(md.Fields().ByNumber(1))
+			vt := goTypeForField(md.Fields().ByNumber(2))
+			t = reflect.MapOf(kt, vt)
+		}
+	}
+
+	// Locally cache the message type for the given name.
+	if t != nil {
+		v, _ := messageTypeCache.LoadOrStore(s, t)
+		return v.(reflect.Type)
+	}
+	return nil
+}
+
+func goTypeForField(fd protoreflect.FieldDescriptor) reflect.Type {
+	switch k := fd.Kind(); k {
+	case protoreflect.EnumKind:
+		if et, _ := protoregistry.GlobalTypes.FindEnumByName(fd.Enum().FullName()); et != nil {
+			return enumGoType(et)
+		}
+		return reflect.TypeOf(protoreflect.EnumNumber(0))
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		if mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()); mt != nil {
+			return messageGoType(mt)
+		}
+		return reflect.TypeOf((*protoreflect.Message)(nil)).Elem()
+	default:
+		return reflect.TypeOf(fd.Default().Interface())
+	}
+}
+
+func enumGoType(et protoreflect.EnumType) reflect.Type {
+	return reflect.TypeOf(et.New(0))
+}
+
+func messageGoType(mt protoreflect.MessageType) reflect.Type {
+	return reflect.TypeOf(MessageV1(mt.Zero().Interface()))
+}
+
+// MessageName returns the full protobuf name for the given message type.
+//
+// Deprecated: Use protoreflect.MessageDescriptor.FullName instead.
+func MessageName(m Message) messageName {
+	if m == nil {
+		return ""
+	}
+	if m, ok := m.(interface{ XXX_MessageName() messageName }); ok {
+		return m.XXX_MessageName()
+	}
+	return messageName(protoimpl.X.MessageDescriptorOf(m).FullName())
+}
+
+// RegisterExtension is called from the generated code to register
+// the extension descriptor.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RegisterExtension instead.
+func RegisterExtension(d *ExtensionDesc) {
+	if err := protoregistry.GlobalTypes.RegisterExtension(d); err != nil {
+		panic(err)
+	}
+}
+
+type extensionsByNumber = map[int32]*ExtensionDesc
+
+var extensionCache sync.Map // map[messageName]extensionsByNumber
+
+// RegisteredExtensions returns a map of the registered extensions for the
+// provided protobuf message, indexed by the extension field number.
+//
+// Deprecated: Use protoregistry.GlobalTypes.RangeExtensionsByMessage instead.
+func RegisteredExtensions(m Message) extensionsByNumber {
+	// Check whether the cache is stale. If the number of extensions for
+	// the given message differs, then it means that some extensions were
+	// recently registered upstream that we do not know about.
+	s := MessageName(m)
+	v, _ := extensionCache.Load(s)
+	xs, _ := v.(extensionsByNumber)
+	if protoregistry.GlobalTypes.NumExtensionsByMessage(protoreflect.FullName(s)) == len(xs) {
+		return xs // cache is up-to-date
+	}
+
+	// Cache is stale, re-compute the extensions map.
+	xs = make(extensionsByNumber)
+	protoregistry.GlobalTypes.RangeExtensionsByMessage(protoreflect.FullName(s), func(xt protoreflect.ExtensionType) bool {
+		if xd, ok := xt.(*ExtensionDesc); ok {
+			xs[int32(xt.TypeDescriptor().Number())] = xd
+		} else {
+			// TODO: This implies that the protoreflect.ExtensionType is a
+			// custom type not generated by protoc-gen-go. We could try and
+			// convert the type to an ExtensionDesc.
+		}
+		return true
+	})
+	extensionCache.Store(s, xs)
+	return xs
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_decode.go b/vendor/github.com/golang/protobuf/proto/text_decode.go
new file mode 100644
index 0000000..47eb3e4
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_decode.go
@@ -0,0 +1,801 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"encoding"
+	"errors"
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"unicode/utf8"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextUnmarshalV2 = false
+
+// ParseError is returned by UnmarshalText.
+type ParseError struct {
+	Message string
+
+	// Deprecated: Do not use.
+	Line, Offset int
+}
+
+func (e *ParseError) Error() string {
+	if wrapTextUnmarshalV2 {
+		return e.Message
+	}
+	if e.Line == 1 {
+		return fmt.Sprintf("line 1.%d: %v", e.Offset, e.Message)
+	}
+	return fmt.Sprintf("line %d: %v", e.Line, e.Message)
+}
+
+// UnmarshalText parses a proto text formatted string into m.
+func UnmarshalText(s string, m Message) error {
+	if u, ok := m.(encoding.TextUnmarshaler); ok {
+		return u.UnmarshalText([]byte(s))
+	}
+
+	m.Reset()
+	mi := MessageV2(m)
+
+	if wrapTextUnmarshalV2 {
+		err := prototext.UnmarshalOptions{
+			AllowPartial: true,
+		}.Unmarshal([]byte(s), mi)
+		if err != nil {
+			return &ParseError{Message: err.Error()}
+		}
+		return checkRequiredNotSet(mi)
+	} else {
+		if err := newTextParser(s).unmarshalMessage(mi.ProtoReflect(), ""); err != nil {
+			return err
+		}
+		return checkRequiredNotSet(mi)
+	}
+}
+
+type textParser struct {
+	s            string // remaining input
+	done         bool   // whether the parsing is finished (success or error)
+	backed       bool   // whether back() was called
+	offset, line int
+	cur          token
+}
+
+type token struct {
+	value    string
+	err      *ParseError
+	line     int    // line number
+	offset   int    // byte number from start of input, not start of line
+	unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func newTextParser(s string) *textParser {
+	p := new(textParser)
+	p.s = s
+	p.line = 1
+	p.cur.line = 1
+	return p
+}
+
+func (p *textParser) unmarshalMessage(m protoreflect.Message, terminator string) (err error) {
+	md := m.Descriptor()
+	fds := md.Fields()
+
+	// A struct is a sequence of "name: value", terminated by one of
+	// '>' or '}', or the end of the input.  A name may also be
+	// "[extension]" or "[type/url]".
+	//
+	// The whole struct can also be an expanded Any message, like:
+	// [type/url] < ... struct contents ... >
+	seen := make(map[protoreflect.FieldNumber]bool)
+	for {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		if tok.value == terminator {
+			break
+		}
+		if tok.value == "[" {
+			if err := p.unmarshalExtensionOrAny(m, seen); err != nil {
+				return err
+			}
+			continue
+		}
+
+		// This is a normal, non-extension field.
+		name := protoreflect.Name(tok.value)
+		fd := fds.ByName(name)
+		switch {
+		case fd == nil:
+			gd := fds.ByName(protoreflect.Name(strings.ToLower(string(name))))
+			if gd != nil && gd.Kind() == protoreflect.GroupKind && gd.Message().Name() == name {
+				fd = gd
+			}
+		case fd.Kind() == protoreflect.GroupKind && fd.Message().Name() != name:
+			fd = nil
+		case fd.IsWeak() && fd.Message().IsPlaceholder():
+			fd = nil
+		}
+		if fd == nil {
+			typeName := string(md.FullName())
+			if m, ok := m.Interface().(Message); ok {
+				t := reflect.TypeOf(m)
+				if t.Kind() == reflect.Ptr {
+					typeName = t.Elem().String()
+				}
+			}
+			return p.errorf("unknown field name %q in %v", name, typeName)
+		}
+		if od := fd.ContainingOneof(); od != nil && m.WhichOneof(od) != nil {
+			return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, od.Name())
+		}
+		if fd.Cardinality() != protoreflect.Repeated && seen[fd.Number()] {
+			return p.errorf("non-repeated field %q was repeated", fd.Name())
+		}
+		seen[fd.Number()] = true
+
+		// Consume any colon.
+		if err := p.checkForColon(fd); err != nil {
+			return err
+		}
+
+		// Parse into the field.
+		v := m.Get(fd)
+		if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+			v = m.Mutable(fd)
+		}
+		if v, err = p.unmarshalValue(v, fd); err != nil {
+			return err
+		}
+		m.Set(fd, v)
+
+		if err := p.consumeOptionalSeparator(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+func (p *textParser) unmarshalExtensionOrAny(m protoreflect.Message, seen map[protoreflect.FieldNumber]bool) error {
+	name, err := p.consumeExtensionOrAnyName()
+	if err != nil {
+		return err
+	}
+
+	// If it contains a slash, it's an Any type URL.
+	if slashIdx := strings.LastIndex(name, "/"); slashIdx >= 0 {
+		tok := p.next()
+		if tok.err != nil {
+			return tok.err
+		}
+		// consume an optional colon
+		if tok.value == ":" {
+			tok = p.next()
+			if tok.err != nil {
+				return tok.err
+			}
+		}
+
+		var terminator string
+		switch tok.value {
+		case "<":
+			terminator = ">"
+		case "{":
+			terminator = "}"
+		default:
+			return p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+
+		mt, err := protoregistry.GlobalTypes.FindMessageByURL(name)
+		if err != nil {
+			return p.errorf("unrecognized message %q in google.protobuf.Any", name[slashIdx+len("/"):])
+		}
+		m2 := mt.New()
+		if err := p.unmarshalMessage(m2, terminator); err != nil {
+			return err
+		}
+		b, err := protoV2.Marshal(m2.Interface())
+		if err != nil {
+			return p.errorf("failed to marshal message of type %q: %v", name[slashIdx+len("/"):], err)
+		}
+
+		urlFD := m.Descriptor().Fields().ByName("type_url")
+		valFD := m.Descriptor().Fields().ByName("value")
+		if seen[urlFD.Number()] {
+			return p.errorf("Any message unpacked multiple times, or %q already set", urlFD.Name())
+		}
+		if seen[valFD.Number()] {
+			return p.errorf("Any message unpacked multiple times, or %q already set", valFD.Name())
+		}
+		m.Set(urlFD, protoreflect.ValueOfString(name))
+		m.Set(valFD, protoreflect.ValueOfBytes(b))
+		seen[urlFD.Number()] = true
+		seen[valFD.Number()] = true
+		return nil
+	}
+
+	xname := protoreflect.FullName(name)
+	xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
+	if xt == nil && isMessageSet(m.Descriptor()) {
+		xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
+	}
+	if xt == nil {
+		return p.errorf("unrecognized extension %q", name)
+	}
+	fd := xt.TypeDescriptor()
+	if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
+		return p.errorf("extension field %q does not extend message %q", name, m.Descriptor().FullName())
+	}
+
+	if err := p.checkForColon(fd); err != nil {
+		return err
+	}
+
+	v := m.Get(fd)
+	if !m.Has(fd) && (fd.IsList() || fd.IsMap() || fd.Message() != nil) {
+		v = m.Mutable(fd)
+	}
+	v, err = p.unmarshalValue(v, fd)
+	if err != nil {
+		return err
+	}
+	m.Set(fd, v)
+	return p.consumeOptionalSeparator()
+}
+
+func (p *textParser) unmarshalValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return v, tok.err
+	}
+	if tok.value == "" {
+		return v, p.errorf("unexpected EOF")
+	}
+
+	switch {
+	case fd.IsList():
+		lv := v.List()
+		var err error
+		if tok.value == "[" {
+			// Repeated field with list notation, like [1,2,3].
+			for {
+				vv := lv.NewElement()
+				vv, err = p.unmarshalSingularValue(vv, fd)
+				if err != nil {
+					return v, err
+				}
+				lv.Append(vv)
+
+				tok := p.next()
+				if tok.err != nil {
+					return v, tok.err
+				}
+				if tok.value == "]" {
+					break
+				}
+				if tok.value != "," {
+					return v, p.errorf("Expected ']' or ',' found %q", tok.value)
+				}
+			}
+			return v, nil
+		}
+
+		// One value of the repeated field.
+		p.back()
+		vv := lv.NewElement()
+		vv, err = p.unmarshalSingularValue(vv, fd)
+		if err != nil {
+			return v, err
+		}
+		lv.Append(vv)
+		return v, nil
+	case fd.IsMap():
+		// The map entry should be this sequence of tokens:
+		//	< key : KEY value : VALUE >
+		// However, implementations may omit key or value, and technically
+		// we should support them in any order.
+		var terminator string
+		switch tok.value {
+		case "<":
+			terminator = ">"
+		case "{":
+			terminator = "}"
+		default:
+			return v, p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+
+		keyFD := fd.MapKey()
+		valFD := fd.MapValue()
+
+		mv := v.Map()
+		kv := keyFD.Default()
+		vv := mv.NewValue()
+		for {
+			tok := p.next()
+			if tok.err != nil {
+				return v, tok.err
+			}
+			if tok.value == terminator {
+				break
+			}
+			var err error
+			switch tok.value {
+			case "key":
+				if err := p.consumeToken(":"); err != nil {
+					return v, err
+				}
+				if kv, err = p.unmarshalSingularValue(kv, keyFD); err != nil {
+					return v, err
+				}
+				if err := p.consumeOptionalSeparator(); err != nil {
+					return v, err
+				}
+			case "value":
+				if err := p.checkForColon(valFD); err != nil {
+					return v, err
+				}
+				if vv, err = p.unmarshalSingularValue(vv, valFD); err != nil {
+					return v, err
+				}
+				if err := p.consumeOptionalSeparator(); err != nil {
+					return v, err
+				}
+			default:
+				p.back()
+				return v, p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value)
+			}
+		}
+		mv.Set(kv.MapKey(), vv)
+		return v, nil
+	default:
+		p.back()
+		return p.unmarshalSingularValue(v, fd)
+	}
+}
+
+func (p *textParser) unmarshalSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return v, tok.err
+	}
+	if tok.value == "" {
+		return v, p.errorf("unexpected EOF")
+	}
+
+	switch fd.Kind() {
+	case protoreflect.BoolKind:
+		switch tok.value {
+		case "true", "1", "t", "True":
+			return protoreflect.ValueOfBool(true), nil
+		case "false", "0", "f", "False":
+			return protoreflect.ValueOfBool(false), nil
+		}
+	case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			return protoreflect.ValueOfInt32(int32(x)), nil
+		}
+
+		// The C++ parser accepts large positive hex numbers that uses
+		// two's complement arithmetic to represent negative numbers.
+		// This feature is here for backwards compatibility with C++.
+		if strings.HasPrefix(tok.value, "0x") {
+			if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+				return protoreflect.ValueOfInt32(int32(-(int64(^x) + 1))), nil
+			}
+		}
+	case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+		if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+			return protoreflect.ValueOfInt64(int64(x)), nil
+		}
+
+		// The C++ parser accepts large positive hex numbers that uses
+		// two's complement arithmetic to represent negative numbers.
+		// This feature is here for backwards compatibility with C++.
+		if strings.HasPrefix(tok.value, "0x") {
+			if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+				return protoreflect.ValueOfInt64(int64(-(int64(^x) + 1))), nil
+			}
+		}
+	case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
+		if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+			return protoreflect.ValueOfUint32(uint32(x)), nil
+		}
+	case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+		if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+			return protoreflect.ValueOfUint64(uint64(x)), nil
+		}
+	case protoreflect.FloatKind:
+		// Ignore 'f' for compatibility with output generated by C++,
+		// but don't remove 'f' when the value is "-inf" or "inf".
+		v := tok.value
+		if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+			v = v[:len(v)-len("f")]
+		}
+		if x, err := strconv.ParseFloat(v, 32); err == nil {
+			return protoreflect.ValueOfFloat32(float32(x)), nil
+		}
+	case protoreflect.DoubleKind:
+		// Ignore 'f' for compatibility with output generated by C++,
+		// but don't remove 'f' when the value is "-inf" or "inf".
+		v := tok.value
+		if strings.HasSuffix(v, "f") && v != "-inf" && v != "inf" {
+			v = v[:len(v)-len("f")]
+		}
+		if x, err := strconv.ParseFloat(v, 64); err == nil {
+			return protoreflect.ValueOfFloat64(float64(x)), nil
+		}
+	case protoreflect.StringKind:
+		if isQuote(tok.value[0]) {
+			return protoreflect.ValueOfString(tok.unquoted), nil
+		}
+	case protoreflect.BytesKind:
+		if isQuote(tok.value[0]) {
+			return protoreflect.ValueOfBytes([]byte(tok.unquoted)), nil
+		}
+	case protoreflect.EnumKind:
+		if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+			return protoreflect.ValueOfEnum(protoreflect.EnumNumber(x)), nil
+		}
+		vd := fd.Enum().Values().ByName(protoreflect.Name(tok.value))
+		if vd != nil {
+			return protoreflect.ValueOfEnum(vd.Number()), nil
+		}
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		var terminator string
+		switch tok.value {
+		case "{":
+			terminator = "}"
+		case "<":
+			terminator = ">"
+		default:
+			return v, p.errorf("expected '{' or '<', found %q", tok.value)
+		}
+		err := p.unmarshalMessage(v.Message(), terminator)
+		return v, err
+	default:
+		panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
+	}
+	return v, p.errorf("invalid %v: %v", fd.Kind(), tok.value)
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(fd protoreflect.FieldDescriptor) *ParseError {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ":" {
+		if fd.Message() == nil {
+			return p.errorf("expected ':', found %q", tok.value)
+		}
+		p.back()
+	}
+	return nil
+}
+
+// consumeExtensionOrAnyName consumes an extension name or an Any type URL and
+// the following ']'. It returns the name or URL consumed.
+func (p *textParser) consumeExtensionOrAnyName() (string, error) {
+	tok := p.next()
+	if tok.err != nil {
+		return "", tok.err
+	}
+
+	// If extension name or type url is quoted, it's a single token.
+	if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] {
+		name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0]))
+		if err != nil {
+			return "", err
+		}
+		return name, p.consumeToken("]")
+	}
+
+	// Consume everything up to "]"
+	var parts []string
+	for tok.value != "]" {
+		parts = append(parts, tok.value)
+		tok = p.next()
+		if tok.err != nil {
+			return "", p.errorf("unrecognized type_url or extension name: %s", tok.err)
+		}
+		if p.done && tok.value != "]" {
+			return "", p.errorf("unclosed type_url or extension name")
+		}
+	}
+	return strings.Join(parts, ""), nil
+}
+
+// consumeOptionalSeparator consumes an optional semicolon or comma.
+// It is used in unmarshalMessage to provide backward compatibility.
+func (p *textParser) consumeOptionalSeparator() error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != ";" && tok.value != "," {
+		p.back()
+	}
+	return nil
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+	pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+	p.cur.err = pe
+	p.done = true
+	return pe
+}
+
+func (p *textParser) skipWhitespace() {
+	i := 0
+	for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+		if p.s[i] == '#' {
+			// comment; skip to end of line or input
+			for i < len(p.s) && p.s[i] != '\n' {
+				i++
+			}
+			if i == len(p.s) {
+				break
+			}
+		}
+		if p.s[i] == '\n' {
+			p.line++
+		}
+		i++
+	}
+	p.offset += i
+	p.s = p.s[i:len(p.s)]
+	if len(p.s) == 0 {
+		p.done = true
+	}
+}
+
+func (p *textParser) advance() {
+	// Skip whitespace
+	p.skipWhitespace()
+	if p.done {
+		return
+	}
+
+	// Start of non-whitespace
+	p.cur.err = nil
+	p.cur.offset, p.cur.line = p.offset, p.line
+	p.cur.unquoted = ""
+	switch p.s[0] {
+	case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/':
+		// Single symbol
+		p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+	case '"', '\'':
+		// Quoted string
+		i := 1
+		for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+			if p.s[i] == '\\' && i+1 < len(p.s) {
+				// skip escaped char
+				i++
+			}
+			i++
+		}
+		if i >= len(p.s) || p.s[i] != p.s[0] {
+			p.errorf("unmatched quote")
+			return
+		}
+		unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+		if err != nil {
+			p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err)
+			return
+		}
+		p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+		p.cur.unquoted = unq
+	default:
+		i := 0
+		for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+			i++
+		}
+		if i == 0 {
+			p.errorf("unexpected byte %#x", p.s[0])
+			return
+		}
+		p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+	}
+	p.offset += len(p.cur.value)
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+	if p.backed || p.done {
+		p.backed = false
+		return &p.cur
+	}
+	p.advance()
+	if p.done {
+		p.cur.value = ""
+	} else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) {
+		// Look for multiple quoted strings separated by whitespace,
+		// and concatenate them.
+		cat := p.cur
+		for {
+			p.skipWhitespace()
+			if p.done || !isQuote(p.s[0]) {
+				break
+			}
+			p.advance()
+			if p.cur.err != nil {
+				return &p.cur
+			}
+			cat.value += " " + p.cur.value
+			cat.unquoted += p.cur.unquoted
+		}
+		p.done = false // parser may have seen EOF, but we want to return cat
+		p.cur = cat
+	}
+	return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+	tok := p.next()
+	if tok.err != nil {
+		return tok.err
+	}
+	if tok.value != s {
+		p.back()
+		return p.errorf("expected %q, found %q", s, tok.value)
+	}
+	return nil
+}
+
+var errBadUTF8 = errors.New("proto: bad UTF-8")
+
+func unquoteC(s string, quote rune) (string, error) {
+	// This is based on C++'s tokenizer.cc.
+	// Despite its name, this is *not* parsing C syntax.
+	// For instance, "\0" is an invalid quoted string.
+
+	// Avoid allocation in trivial cases.
+	simple := true
+	for _, r := range s {
+		if r == '\\' || r == quote {
+			simple = false
+			break
+		}
+	}
+	if simple {
+		return s, nil
+	}
+
+	buf := make([]byte, 0, 3*len(s)/2)
+	for len(s) > 0 {
+		r, n := utf8.DecodeRuneInString(s)
+		if r == utf8.RuneError && n == 1 {
+			return "", errBadUTF8
+		}
+		s = s[n:]
+		if r != '\\' {
+			if r < utf8.RuneSelf {
+				buf = append(buf, byte(r))
+			} else {
+				buf = append(buf, string(r)...)
+			}
+			continue
+		}
+
+		ch, tail, err := unescape(s)
+		if err != nil {
+			return "", err
+		}
+		buf = append(buf, ch...)
+		s = tail
+	}
+	return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+	r, n := utf8.DecodeRuneInString(s)
+	if r == utf8.RuneError && n == 1 {
+		return "", "", errBadUTF8
+	}
+	s = s[n:]
+	switch r {
+	case 'a':
+		return "\a", s, nil
+	case 'b':
+		return "\b", s, nil
+	case 'f':
+		return "\f", s, nil
+	case 'n':
+		return "\n", s, nil
+	case 'r':
+		return "\r", s, nil
+	case 't':
+		return "\t", s, nil
+	case 'v':
+		return "\v", s, nil
+	case '?':
+		return "?", s, nil // trigraph workaround
+	case '\'', '"', '\\':
+		return string(r), s, nil
+	case '0', '1', '2', '3', '4', '5', '6', '7':
+		if len(s) < 2 {
+			return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+		}
+		ss := string(r) + s[:2]
+		s = s[2:]
+		i, err := strconv.ParseUint(ss, 8, 8)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss)
+		}
+		return string([]byte{byte(i)}), s, nil
+	case 'x', 'X', 'u', 'U':
+		var n int
+		switch r {
+		case 'x', 'X':
+			n = 2
+		case 'u':
+			n = 4
+		case 'U':
+			n = 8
+		}
+		if len(s) < n {
+			return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n)
+		}
+		ss := s[:n]
+		s = s[n:]
+		i, err := strconv.ParseUint(ss, 16, 64)
+		if err != nil {
+			return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss)
+		}
+		if r == 'x' || r == 'X' {
+			return string([]byte{byte(i)}), s, nil
+		}
+		if i > utf8.MaxRune {
+			return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss)
+		}
+		return string(rune(i)), s, nil
+	}
+	return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+func isIdentOrNumberChar(c byte) bool {
+	switch {
+	case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+		return true
+	case '0' <= c && c <= '9':
+		return true
+	}
+	switch c {
+	case '-', '+', '.', '_':
+		return true
+	}
+	return false
+}
+
+func isWhitespace(c byte) bool {
+	switch c {
+	case ' ', '\t', '\n', '\r':
+		return true
+	}
+	return false
+}
+
+func isQuote(c byte) bool {
+	switch c {
+	case '"', '\'':
+		return true
+	}
+	return false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/text_encode.go b/vendor/github.com/golang/protobuf/proto/text_encode.go
new file mode 100644
index 0000000..a31134e
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/text_encode.go
@@ -0,0 +1,560 @@
+// Copyright 2010 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	"bytes"
+	"encoding"
+	"fmt"
+	"io"
+	"math"
+	"sort"
+	"strings"
+
+	"google.golang.org/protobuf/encoding/prototext"
+	"google.golang.org/protobuf/encoding/protowire"
+	"google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+)
+
+const wrapTextMarshalV2 = false
+
+// TextMarshaler is a configurable text format marshaler.
+type TextMarshaler struct {
+	Compact   bool // use compact text format (one line)
+	ExpandAny bool // expand google.protobuf.Any messages of known types
+}
+
+// Marshal writes the proto text format of m to w.
+func (tm *TextMarshaler) Marshal(w io.Writer, m Message) error {
+	b, err := tm.marshal(m)
+	if len(b) > 0 {
+		if _, err := w.Write(b); err != nil {
+			return err
+		}
+	}
+	return err
+}
+
+// Text returns a proto text formatted string of m.
+func (tm *TextMarshaler) Text(m Message) string {
+	b, _ := tm.marshal(m)
+	return string(b)
+}
+
+func (tm *TextMarshaler) marshal(m Message) ([]byte, error) {
+	mr := MessageReflect(m)
+	if mr == nil || !mr.IsValid() {
+		return []byte("<nil>"), nil
+	}
+
+	if wrapTextMarshalV2 {
+		if m, ok := m.(encoding.TextMarshaler); ok {
+			return m.MarshalText()
+		}
+
+		opts := prototext.MarshalOptions{
+			AllowPartial: true,
+			EmitUnknown:  true,
+		}
+		if !tm.Compact {
+			opts.Indent = "  "
+		}
+		if !tm.ExpandAny {
+			opts.Resolver = (*protoregistry.Types)(nil)
+		}
+		return opts.Marshal(mr.Interface())
+	} else {
+		w := &textWriter{
+			compact:   tm.Compact,
+			expandAny: tm.ExpandAny,
+			complete:  true,
+		}
+
+		if m, ok := m.(encoding.TextMarshaler); ok {
+			b, err := m.MarshalText()
+			if err != nil {
+				return nil, err
+			}
+			w.Write(b)
+			return w.buf, nil
+		}
+
+		err := w.writeMessage(mr)
+		return w.buf, err
+	}
+}
+
+var (
+	defaultTextMarshaler = TextMarshaler{}
+	compactTextMarshaler = TextMarshaler{Compact: true}
+)
+
+// MarshalText writes the proto text format of m to w.
+func MarshalText(w io.Writer, m Message) error { return defaultTextMarshaler.Marshal(w, m) }
+
+// MarshalTextString returns a proto text formatted string of m.
+func MarshalTextString(m Message) string { return defaultTextMarshaler.Text(m) }
+
+// CompactText writes the compact proto text format of m to w.
+func CompactText(w io.Writer, m Message) error { return compactTextMarshaler.Marshal(w, m) }
+
+// CompactTextString returns a compact proto text formatted string of m.
+func CompactTextString(m Message) string { return compactTextMarshaler.Text(m) }
+
+var (
+	newline         = []byte("\n")
+	endBraceNewline = []byte("}\n")
+	posInf          = []byte("inf")
+	negInf          = []byte("-inf")
+	nan             = []byte("nan")
+)
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+	compact   bool // same as TextMarshaler.Compact
+	expandAny bool // same as TextMarshaler.ExpandAny
+	complete  bool // whether the current position is a complete line
+	indent    int  // indentation level; never negative
+	buf       []byte
+}
+
+func (w *textWriter) Write(p []byte) (n int, _ error) {
+	newlines := bytes.Count(p, newline)
+	if newlines == 0 {
+		if !w.compact && w.complete {
+			w.writeIndent()
+		}
+		w.buf = append(w.buf, p...)
+		w.complete = false
+		return len(p), nil
+	}
+
+	frags := bytes.SplitN(p, newline, newlines+1)
+	if w.compact {
+		for i, frag := range frags {
+			if i > 0 {
+				w.buf = append(w.buf, ' ')
+				n++
+			}
+			w.buf = append(w.buf, frag...)
+			n += len(frag)
+		}
+		return n, nil
+	}
+
+	for i, frag := range frags {
+		if w.complete {
+			w.writeIndent()
+		}
+		w.buf = append(w.buf, frag...)
+		n += len(frag)
+		if i+1 < len(frags) {
+			w.buf = append(w.buf, '\n')
+			n++
+		}
+	}
+	w.complete = len(frags[len(frags)-1]) == 0
+	return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+	if w.compact && c == '\n' {
+		c = ' '
+	}
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	w.buf = append(w.buf, c)
+	w.complete = c == '\n'
+	return nil
+}
+
+func (w *textWriter) writeName(fd protoreflect.FieldDescriptor) {
+	if !w.compact && w.complete {
+		w.writeIndent()
+	}
+	w.complete = false
+
+	if fd.Kind() != protoreflect.GroupKind {
+		w.buf = append(w.buf, fd.Name()...)
+		w.WriteByte(':')
+	} else {
+		// Use message type name for group field name.
+		w.buf = append(w.buf, fd.Message().Name()...)
+	}
+
+	if !w.compact {
+		w.WriteByte(' ')
+	}
+}
+
+func requiresQuotes(u string) bool {
+	// When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted.
+	for _, ch := range u {
+		switch {
+		case ch == '.' || ch == '/' || ch == '_':
+			continue
+		case '0' <= ch && ch <= '9':
+			continue
+		case 'A' <= ch && ch <= 'Z':
+			continue
+		case 'a' <= ch && ch <= 'z':
+			continue
+		default:
+			return true
+		}
+	}
+	return false
+}
+
+// writeProto3Any writes an expanded google.protobuf.Any message.
+//
+// It returns (false, nil) if sv value can't be unmarshaled (e.g. because
+// required messages are not linked in).
+//
+// It returns (true, error) when sv was written in expanded format or an error
+// was encountered.
+func (w *textWriter) writeProto3Any(m protoreflect.Message) (bool, error) {
+	md := m.Descriptor()
+	fdURL := md.Fields().ByName("type_url")
+	fdVal := md.Fields().ByName("value")
+
+	url := m.Get(fdURL).String()
+	mt, err := protoregistry.GlobalTypes.FindMessageByURL(url)
+	if err != nil {
+		return false, nil
+	}
+
+	b := m.Get(fdVal).Bytes()
+	m2 := mt.New()
+	if err := proto.Unmarshal(b, m2.Interface()); err != nil {
+		return false, nil
+	}
+	w.Write([]byte("["))
+	if requiresQuotes(url) {
+		w.writeQuotedString(url)
+	} else {
+		w.Write([]byte(url))
+	}
+	if w.compact {
+		w.Write([]byte("]:<"))
+	} else {
+		w.Write([]byte("]: <\n"))
+		w.indent++
+	}
+	if err := w.writeMessage(m2); err != nil {
+		return true, err
+	}
+	if w.compact {
+		w.Write([]byte("> "))
+	} else {
+		w.indent--
+		w.Write([]byte(">\n"))
+	}
+	return true, nil
+}
+
+func (w *textWriter) writeMessage(m protoreflect.Message) error {
+	md := m.Descriptor()
+	if w.expandAny && md.FullName() == "google.protobuf.Any" {
+		if canExpand, err := w.writeProto3Any(m); canExpand {
+			return err
+		}
+	}
+
+	fds := md.Fields()
+	for i := 0; i < fds.Len(); {
+		fd := fds.Get(i)
+		if od := fd.ContainingOneof(); od != nil {
+			fd = m.WhichOneof(od)
+			i += od.Fields().Len()
+		} else {
+			i++
+		}
+		if fd == nil || !m.Has(fd) {
+			continue
+		}
+
+		switch {
+		case fd.IsList():
+			lv := m.Get(fd).List()
+			for j := 0; j < lv.Len(); j++ {
+				w.writeName(fd)
+				v := lv.Get(j)
+				if err := w.writeSingularValue(v, fd); err != nil {
+					return err
+				}
+				w.WriteByte('\n')
+			}
+		case fd.IsMap():
+			kfd := fd.MapKey()
+			vfd := fd.MapValue()
+			mv := m.Get(fd).Map()
+
+			type entry struct{ key, val protoreflect.Value }
+			var entries []entry
+			mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
+				entries = append(entries, entry{k.Value(), v})
+				return true
+			})
+			sort.Slice(entries, func(i, j int) bool {
+				switch kfd.Kind() {
+				case protoreflect.BoolKind:
+					return !entries[i].key.Bool() && entries[j].key.Bool()
+				case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
+					return entries[i].key.Int() < entries[j].key.Int()
+				case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
+					return entries[i].key.Uint() < entries[j].key.Uint()
+				case protoreflect.StringKind:
+					return entries[i].key.String() < entries[j].key.String()
+				default:
+					panic("invalid kind")
+				}
+			})
+			for _, entry := range entries {
+				w.writeName(fd)
+				w.WriteByte('<')
+				if !w.compact {
+					w.WriteByte('\n')
+				}
+				w.indent++
+				w.writeName(kfd)
+				if err := w.writeSingularValue(entry.key, kfd); err != nil {
+					return err
+				}
+				w.WriteByte('\n')
+				w.writeName(vfd)
+				if err := w.writeSingularValue(entry.val, vfd); err != nil {
+					return err
+				}
+				w.WriteByte('\n')
+				w.indent--
+				w.WriteByte('>')
+				w.WriteByte('\n')
+			}
+		default:
+			w.writeName(fd)
+			if err := w.writeSingularValue(m.Get(fd), fd); err != nil {
+				return err
+			}
+			w.WriteByte('\n')
+		}
+	}
+
+	if b := m.GetUnknown(); len(b) > 0 {
+		w.writeUnknownFields(b)
+	}
+	return w.writeExtensions(m)
+}
+
+func (w *textWriter) writeSingularValue(v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	switch fd.Kind() {
+	case protoreflect.FloatKind, protoreflect.DoubleKind:
+		switch vf := v.Float(); {
+		case math.IsInf(vf, +1):
+			w.Write(posInf)
+		case math.IsInf(vf, -1):
+			w.Write(negInf)
+		case math.IsNaN(vf):
+			w.Write(nan)
+		default:
+			fmt.Fprint(w, v.Interface())
+		}
+	case protoreflect.StringKind:
+		// NOTE: This does not validate UTF-8 for historical reasons.
+		w.writeQuotedString(string(v.String()))
+	case protoreflect.BytesKind:
+		w.writeQuotedString(string(v.Bytes()))
+	case protoreflect.MessageKind, protoreflect.GroupKind:
+		var bra, ket byte = '<', '>'
+		if fd.Kind() == protoreflect.GroupKind {
+			bra, ket = '{', '}'
+		}
+		w.WriteByte(bra)
+		if !w.compact {
+			w.WriteByte('\n')
+		}
+		w.indent++
+		m := v.Message()
+		if m2, ok := m.Interface().(encoding.TextMarshaler); ok {
+			b, err := m2.MarshalText()
+			if err != nil {
+				return err
+			}
+			w.Write(b)
+		} else {
+			w.writeMessage(m)
+		}
+		w.indent--
+		w.WriteByte(ket)
+	case protoreflect.EnumKind:
+		if ev := fd.Enum().Values().ByNumber(v.Enum()); ev != nil {
+			fmt.Fprint(w, ev.Name())
+		} else {
+			fmt.Fprint(w, v.Enum())
+		}
+	default:
+		fmt.Fprint(w, v.Interface())
+	}
+	return nil
+}
+
+// writeQuotedString writes a quoted string in the protocol buffer text format.
+func (w *textWriter) writeQuotedString(s string) {
+	w.WriteByte('"')
+	for i := 0; i < len(s); i++ {
+		switch c := s[i]; c {
+		case '\n':
+			w.buf = append(w.buf, `\n`...)
+		case '\r':
+			w.buf = append(w.buf, `\r`...)
+		case '\t':
+			w.buf = append(w.buf, `\t`...)
+		case '"':
+			w.buf = append(w.buf, `\"`...)
+		case '\\':
+			w.buf = append(w.buf, `\\`...)
+		default:
+			if isPrint := c >= 0x20 && c < 0x7f; isPrint {
+				w.buf = append(w.buf, c)
+			} else {
+				w.buf = append(w.buf, fmt.Sprintf(`\%03o`, c)...)
+			}
+		}
+	}
+	w.WriteByte('"')
+}
+
+func (w *textWriter) writeUnknownFields(b []byte) {
+	if !w.compact {
+		fmt.Fprintf(w, "/* %d unknown bytes */\n", len(b))
+	}
+
+	for len(b) > 0 {
+		num, wtyp, n := protowire.ConsumeTag(b)
+		if n < 0 {
+			return
+		}
+		b = b[n:]
+
+		if wtyp == protowire.EndGroupType {
+			w.indent--
+			w.Write(endBraceNewline)
+			continue
+		}
+		fmt.Fprint(w, num)
+		if wtyp != protowire.StartGroupType {
+			w.WriteByte(':')
+		}
+		if !w.compact || wtyp == protowire.StartGroupType {
+			w.WriteByte(' ')
+		}
+		switch wtyp {
+		case protowire.VarintType:
+			v, n := protowire.ConsumeVarint(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprint(w, v)
+		case protowire.Fixed32Type:
+			v, n := protowire.ConsumeFixed32(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprint(w, v)
+		case protowire.Fixed64Type:
+			v, n := protowire.ConsumeFixed64(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprint(w, v)
+		case protowire.BytesType:
+			v, n := protowire.ConsumeBytes(b)
+			if n < 0 {
+				return
+			}
+			b = b[n:]
+			fmt.Fprintf(w, "%q", v)
+		case protowire.StartGroupType:
+			w.WriteByte('{')
+			w.indent++
+		default:
+			fmt.Fprintf(w, "/* unknown wire type %d */", wtyp)
+		}
+		w.WriteByte('\n')
+	}
+}
+
+// writeExtensions writes all the extensions in m.
+func (w *textWriter) writeExtensions(m protoreflect.Message) error {
+	md := m.Descriptor()
+	if md.ExtensionRanges().Len() == 0 {
+		return nil
+	}
+
+	type ext struct {
+		desc protoreflect.FieldDescriptor
+		val  protoreflect.Value
+	}
+	var exts []ext
+	m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
+		if fd.IsExtension() {
+			exts = append(exts, ext{fd, v})
+		}
+		return true
+	})
+	sort.Slice(exts, func(i, j int) bool {
+		return exts[i].desc.Number() < exts[j].desc.Number()
+	})
+
+	for _, ext := range exts {
+		// For message set, use the name of the message as the extension name.
+		name := string(ext.desc.FullName())
+		if isMessageSet(ext.desc.ContainingMessage()) {
+			name = strings.TrimSuffix(name, ".message_set_extension")
+		}
+
+		if !ext.desc.IsList() {
+			if err := w.writeSingularExtension(name, ext.val, ext.desc); err != nil {
+				return err
+			}
+		} else {
+			lv := ext.val.List()
+			for i := 0; i < lv.Len(); i++ {
+				if err := w.writeSingularExtension(name, lv.Get(i), ext.desc); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	return nil
+}
+
+func (w *textWriter) writeSingularExtension(name string, v protoreflect.Value, fd protoreflect.FieldDescriptor) error {
+	fmt.Fprintf(w, "[%s]:", name)
+	if !w.compact {
+		w.WriteByte(' ')
+	}
+	if err := w.writeSingularValue(v, fd); err != nil {
+		return err
+	}
+	w.WriteByte('\n')
+	return nil
+}
+
+func (w *textWriter) writeIndent() {
+	if !w.complete {
+		return
+	}
+	for i := 0; i < w.indent*2; i++ {
+		w.buf = append(w.buf, ' ')
+	}
+	w.complete = false
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wire.go b/vendor/github.com/golang/protobuf/proto/wire.go
new file mode 100644
index 0000000..d7c28da
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wire.go
@@ -0,0 +1,78 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+import (
+	protoV2 "google.golang.org/protobuf/proto"
+	"google.golang.org/protobuf/runtime/protoiface"
+)
+
+// Size returns the size in bytes of the wire-format encoding of m.
+func Size(m Message) int {
+	if m == nil {
+		return 0
+	}
+	mi := MessageV2(m)
+	return protoV2.Size(mi)
+}
+
+// Marshal returns the wire-format encoding of m.
+func Marshal(m Message) ([]byte, error) {
+	b, err := marshalAppend(nil, m, false)
+	if b == nil {
+		b = zeroBytes
+	}
+	return b, err
+}
+
+var zeroBytes = make([]byte, 0, 0)
+
+func marshalAppend(buf []byte, m Message, deterministic bool) ([]byte, error) {
+	if m == nil {
+		return nil, ErrNil
+	}
+	mi := MessageV2(m)
+	nbuf, err := protoV2.MarshalOptions{
+		Deterministic: deterministic,
+		AllowPartial:  true,
+	}.MarshalAppend(buf, mi)
+	if err != nil {
+		return buf, err
+	}
+	if len(buf) == len(nbuf) {
+		if !mi.ProtoReflect().IsValid() {
+			return buf, ErrNil
+		}
+	}
+	return nbuf, checkRequiredNotSet(mi)
+}
+
+// Unmarshal parses a wire-format message in b and places the decoded results in m.
+//
+// Unmarshal resets m before starting to unmarshal, so any existing data in m is always
+// removed. Use UnmarshalMerge to preserve and append to existing data.
+func Unmarshal(b []byte, m Message) error {
+	m.Reset()
+	return UnmarshalMerge(b, m)
+}
+
+// UnmarshalMerge parses a wire-format message in b and places the decoded results in m.
+func UnmarshalMerge(b []byte, m Message) error {
+	mi := MessageV2(m)
+	out, err := protoV2.UnmarshalOptions{
+		AllowPartial: true,
+		Merge:        true,
+	}.UnmarshalState(protoiface.UnmarshalInput{
+		Buf:     b,
+		Message: mi.ProtoReflect(),
+	})
+	if err != nil {
+		return err
+	}
+	if out.Flags&protoiface.UnmarshalInitialized > 0 {
+		return nil
+	}
+	return checkRequiredNotSet(mi)
+}
diff --git a/vendor/github.com/golang/protobuf/proto/wrappers.go b/vendor/github.com/golang/protobuf/proto/wrappers.go
new file mode 100644
index 0000000..398e348
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/proto/wrappers.go
@@ -0,0 +1,34 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package proto
+
+// Bool stores v in a new bool value and returns a pointer to it.
+func Bool(v bool) *bool { return &v }
+
+// Int stores v in a new int32 value and returns a pointer to it.
+//
+// Deprecated: Use Int32 instead.
+func Int(v int) *int32 { return Int32(int32(v)) }
+
+// Int32 stores v in a new int32 value and returns a pointer to it.
+func Int32(v int32) *int32 { return &v }
+
+// Int64 stores v in a new int64 value and returns a pointer to it.
+func Int64(v int64) *int64 { return &v }
+
+// Uint32 stores v in a new uint32 value and returns a pointer to it.
+func Uint32(v uint32) *uint32 { return &v }
+
+// Uint64 stores v in a new uint64 value and returns a pointer to it.
+func Uint64(v uint64) *uint64 { return &v }
+
+// Float32 stores v in a new float32 value and returns a pointer to it.
+func Float32(v float32) *float32 { return &v }
+
+// Float64 stores v in a new float64 value and returns a pointer to it.
+func Float64(v float64) *float64 { return &v }
+
+// String stores v in a new string value and returns a pointer to it.
+func String(v string) *string { return &v }
diff --git a/vendor/github.com/golang/protobuf/ptypes/any.go b/vendor/github.com/golang/protobuf/ptypes/any.go
new file mode 100644
index 0000000..85f9f57
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any.go
@@ -0,0 +1,179 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+	"fmt"
+	"strings"
+
+	"github.com/golang/protobuf/proto"
+	"google.golang.org/protobuf/reflect/protoreflect"
+	"google.golang.org/protobuf/reflect/protoregistry"
+
+	anypb "github.com/golang/protobuf/ptypes/any"
+)
+
+const urlPrefix = "type.googleapis.com/"
+
+// AnyMessageName returns the message name contained in an anypb.Any message.
+// Most type assertions should use the Is function instead.
+//
+// Deprecated: Call the any.MessageName method instead.
+func AnyMessageName(any *anypb.Any) (string, error) {
+	name, err := anyMessageName(any)
+	return string(name), err
+}
+func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
+	if any == nil {
+		return "", fmt.Errorf("message is nil")
+	}
+	name := protoreflect.FullName(any.TypeUrl)
+	if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
+		name = name[i+len("/"):]
+	}
+	if !name.IsValid() {
+		return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
+	}
+	return name, nil
+}
+
+// MarshalAny marshals the given message m into an anypb.Any message.
+//
+// Deprecated: Call the anypb.New function instead.
+func MarshalAny(m proto.Message) (*anypb.Any, error) {
+	switch dm := m.(type) {
+	case DynamicAny:
+		m = dm.Message
+	case *DynamicAny:
+		if dm == nil {
+			return nil, proto.ErrNil
+		}
+		m = dm.Message
+	}
+	b, err := proto.Marshal(m)
+	if err != nil {
+		return nil, err
+	}
+	return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
+}
+
+// Empty returns a new message of the type specified in an anypb.Any message.
+// It returns protoregistry.NotFound if the corresponding message type could not
+// be resolved in the global registry.
+//
+// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
+// to resolve the message name and create a new instance of it.
+func Empty(any *anypb.Any) (proto.Message, error) {
+	name, err := anyMessageName(any)
+	if err != nil {
+		return nil, err
+	}
+	mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
+	if err != nil {
+		return nil, err
+	}
+	return proto.MessageV1(mt.New().Interface()), nil
+}
+
+// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
+// into the provided message m. It returns an error if the target message
+// does not match the type in the Any message or if an unmarshal error occurs.
+//
+// The target message m may be a *DynamicAny message. If the underlying message
+// type could not be resolved, then this returns protoregistry.NotFound.
+//
+// Deprecated: Call the any.UnmarshalTo method instead.
+func UnmarshalAny(any *anypb.Any, m proto.Message) error {
+	if dm, ok := m.(*DynamicAny); ok {
+		if dm.Message == nil {
+			var err error
+			dm.Message, err = Empty(any)
+			if err != nil {
+				return err
+			}
+		}
+		m = dm.Message
+	}
+
+	anyName, err := AnyMessageName(any)
+	if err != nil {
+		return err
+	}
+	msgName := proto.MessageName(m)
+	if anyName != msgName {
+		return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
+	}
+	return proto.Unmarshal(any.Value, m)
+}
+
+// Is reports whether the Any message contains a message of the specified type.
+//
+// Deprecated: Call the any.MessageIs method instead.
+func Is(any *anypb.Any, m proto.Message) bool {
+	if any == nil || m == nil {
+		return false
+	}
+	name := proto.MessageName(m)
+	if !strings.HasSuffix(any.TypeUrl, name) {
+		return false
+	}
+	return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
+}
+
+// DynamicAny is a value that can be passed to UnmarshalAny to automatically
+// allocate a proto.Message for the type specified in an anypb.Any message.
+// The allocated message is stored in the embedded proto.Message.
+//
+// Example:
+//   var x ptypes.DynamicAny
+//   if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
+//   fmt.Printf("unmarshaled message: %v", x.Message)
+//
+// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
+// the any message contents into a new instance of the underlying message.
+type DynamicAny struct{ proto.Message }
+
+func (m DynamicAny) String() string {
+	if m.Message == nil {
+		return "<nil>"
+	}
+	return m.Message.String()
+}
+func (m DynamicAny) Reset() {
+	if m.Message == nil {
+		return
+	}
+	m.Message.Reset()
+}
+func (m DynamicAny) ProtoMessage() {
+	return
+}
+func (m DynamicAny) ProtoReflect() protoreflect.Message {
+	if m.Message == nil {
+		return nil
+	}
+	return dynamicAny{proto.MessageReflect(m.Message)}
+}
+
+type dynamicAny struct{ protoreflect.Message }
+
+func (m dynamicAny) Type() protoreflect.MessageType {
+	return dynamicAnyType{m.Message.Type()}
+}
+func (m dynamicAny) New() protoreflect.Message {
+	return dynamicAnyType{m.Message.Type()}.New()
+}
+func (m dynamicAny) Interface() protoreflect.ProtoMessage {
+	return DynamicAny{proto.MessageV1(m.Message.Interface())}
+}
+
+type dynamicAnyType struct{ protoreflect.MessageType }
+
+func (t dynamicAnyType) New() protoreflect.Message {
+	return dynamicAny{t.MessageType.New()}
+}
+func (t dynamicAnyType) Zero() protoreflect.Message {
+	return dynamicAny{t.MessageType.Zero()}
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
new file mode 100644
index 0000000..0ef27d3
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/any/any.proto
+
+package any
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	anypb "google.golang.org/protobuf/types/known/anypb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/any.proto.
+
+type Any = anypb.Any
+
+var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
+	0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
+	0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+	0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
+	0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
+	0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
+	0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
+func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
+	file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/doc.go b/vendor/github.com/golang/protobuf/ptypes/doc.go
new file mode 100644
index 0000000..d3c3325
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/doc.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package ptypes provides functionality for interacting with well-known types.
+//
+// Deprecated: Well-known types have specialized functionality directly
+// injected into the generated packages for each message type.
+// See the deprecation notice for each function for the suggested alternative.
+package ptypes
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration.go b/vendor/github.com/golang/protobuf/ptypes/duration.go
new file mode 100644
index 0000000..b2b55dd
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration.go
@@ -0,0 +1,76 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	durationpb "github.com/golang/protobuf/ptypes/duration"
+)
+
+// Range of google.protobuf.Duration as specified in duration.proto.
+// This is about 10,000 years in seconds.
+const (
+	maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
+	minSeconds = -maxSeconds
+)
+
+// Duration converts a durationpb.Duration to a time.Duration.
+// Duration returns an error if dur is invalid or overflows a time.Duration.
+//
+// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
+func Duration(dur *durationpb.Duration) (time.Duration, error) {
+	if err := validateDuration(dur); err != nil {
+		return 0, err
+	}
+	d := time.Duration(dur.Seconds) * time.Second
+	if int64(d/time.Second) != dur.Seconds {
+		return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+	}
+	if dur.Nanos != 0 {
+		d += time.Duration(dur.Nanos) * time.Nanosecond
+		if (d < 0) != (dur.Nanos < 0) {
+			return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
+		}
+	}
+	return d, nil
+}
+
+// DurationProto converts a time.Duration to a durationpb.Duration.
+//
+// Deprecated: Call the durationpb.New function instead.
+func DurationProto(d time.Duration) *durationpb.Duration {
+	nanos := d.Nanoseconds()
+	secs := nanos / 1e9
+	nanos -= secs * 1e9
+	return &durationpb.Duration{
+		Seconds: int64(secs),
+		Nanos:   int32(nanos),
+	}
+}
+
+// validateDuration determines whether the durationpb.Duration is valid
+// according to the definition in google/protobuf/duration.proto.
+// A valid durpb.Duration may still be too large to fit into a time.Duration
+// Note that the range of durationpb.Duration is about 10,000 years,
+// while the range of time.Duration is about 290 years.
+func validateDuration(dur *durationpb.Duration) error {
+	if dur == nil {
+		return errors.New("duration: nil Duration")
+	}
+	if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
+		return fmt.Errorf("duration: %v: seconds out of range", dur)
+	}
+	if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
+		return fmt.Errorf("duration: %v: nanos out of range", dur)
+	}
+	// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
+	if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
+		return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
new file mode 100644
index 0000000..d0079ee
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go
@@ -0,0 +1,63 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/duration/duration.proto
+
+package duration
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	durationpb "google.golang.org/protobuf/types/known/durationpb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/duration.proto.
+
+type Duration = durationpb.Duration
+
+var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
+	0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
+	0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
+	0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
+	0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
+	0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
+	0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
+	0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
+func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
new file mode 100644
index 0000000..16686a6
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go
@@ -0,0 +1,62 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/empty/empty.proto
+
+package empty
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	emptypb "google.golang.org/protobuf/types/known/emptypb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/empty.proto.
+
+type Empty = emptypb.Empty
+
+var File_github_com_golang_protobuf_ptypes_empty_empty_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = []byte{
+	0x0a, 0x33, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e,
+	0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1b, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72,
+	0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x2e, 0x70, 0x72, 0x6f,
+	0x74, 0x6f, 0x42, 0x2f, 0x5a, 0x2d, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d,
+	0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66,
+	0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x65, 0x6d, 0x70, 0x74, 0x79, 0x3b, 0x65, 0x6d,
+	0x70, 0x74, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() }
+func file_github_com_golang_protobuf_ptypes_empty_empty_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_empty_empty_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_empty_empty_proto = out.File
+	file_github_com_golang_protobuf_ptypes_empty_empty_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_empty_empty_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_empty_empty_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
new file mode 100644
index 0000000..8368a3f
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp.go
@@ -0,0 +1,112 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ptypes
+
+import (
+	"errors"
+	"fmt"
+	"time"
+
+	timestamppb "github.com/golang/protobuf/ptypes/timestamp"
+)
+
+// Range of google.protobuf.Duration as specified in timestamp.proto.
+const (
+	// Seconds field of the earliest valid Timestamp.
+	// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	minValidSeconds = -62135596800
+	// Seconds field just after the latest valid Timestamp.
+	// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
+	maxValidSeconds = 253402300800
+)
+
+// Timestamp converts a timestamppb.Timestamp to a time.Time.
+// It returns an error if the argument is invalid.
+//
+// Unlike most Go functions, if Timestamp returns an error, the first return
+// value is not the zero time.Time. Instead, it is the value obtained from the
+// time.Unix function when passed the contents of the Timestamp, in the UTC
+// locale. This may or may not be a meaningful time; many invalid Timestamps
+// do map to valid time.Times.
+//
+// A nil Timestamp returns an error. The first return value in that case is
+// undefined.
+//
+// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
+func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
+	// Don't return the zero value on error, because corresponds to a valid
+	// timestamp. Instead return whatever time.Unix gives us.
+	var t time.Time
+	if ts == nil {
+		t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
+	} else {
+		t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
+	}
+	return t, validateTimestamp(ts)
+}
+
+// TimestampNow returns a google.protobuf.Timestamp for the current time.
+//
+// Deprecated: Call the timestamppb.Now function instead.
+func TimestampNow() *timestamppb.Timestamp {
+	ts, err := TimestampProto(time.Now())
+	if err != nil {
+		panic("ptypes: time.Now() out of Timestamp range")
+	}
+	return ts
+}
+
+// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
+// It returns an error if the resulting Timestamp is invalid.
+//
+// Deprecated: Call the timestamppb.New function instead.
+func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
+	ts := &timestamppb.Timestamp{
+		Seconds: t.Unix(),
+		Nanos:   int32(t.Nanosecond()),
+	}
+	if err := validateTimestamp(ts); err != nil {
+		return nil, err
+	}
+	return ts, nil
+}
+
+// TimestampString returns the RFC 3339 string for valid Timestamps.
+// For invalid Timestamps, it returns an error message in parentheses.
+//
+// Deprecated: Call the ts.AsTime method instead,
+// followed by a call to the Format method on the time.Time value.
+func TimestampString(ts *timestamppb.Timestamp) string {
+	t, err := Timestamp(ts)
+	if err != nil {
+		return fmt.Sprintf("(%v)", err)
+	}
+	return t.Format(time.RFC3339Nano)
+}
+
+// validateTimestamp determines whether a Timestamp is valid.
+// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
+// and has a Nanos field in the range [0, 1e9).
+//
+// If the Timestamp is valid, validateTimestamp returns nil.
+// Otherwise, it returns an error that describes the problem.
+//
+// Every valid Timestamp can be represented by a time.Time,
+// but the converse is not true.
+func validateTimestamp(ts *timestamppb.Timestamp) error {
+	if ts == nil {
+		return errors.New("timestamp: nil Timestamp")
+	}
+	if ts.Seconds < minValidSeconds {
+		return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
+	}
+	if ts.Seconds >= maxValidSeconds {
+		return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
+	}
+	if ts.Nanos < 0 || ts.Nanos >= 1e9 {
+		return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
+	}
+	return nil
+}
diff --git a/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
new file mode 100644
index 0000000..a76f807
--- /dev/null
+++ b/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go
@@ -0,0 +1,64 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto
+
+package timestamp
+
+import (
+	protoreflect "google.golang.org/protobuf/reflect/protoreflect"
+	protoimpl "google.golang.org/protobuf/runtime/protoimpl"
+	timestamppb "google.golang.org/protobuf/types/known/timestamppb"
+	reflect "reflect"
+)
+
+// Symbols defined in public import of google/protobuf/timestamp.proto.
+
+type Timestamp = timestamppb.Timestamp
+
+var File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto protoreflect.FileDescriptor
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = []byte{
+	0x0a, 0x3b, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2f, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67,
+	0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74,
+	0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x37,
+	0x5a, 0x35, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
+	0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
+	0x70, 0x65, 0x73, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x3b, 0x74, 0x69,
+	0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
+	0x33,
+}
+
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = []interface{}{}
+var file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = []int32{
+	0, // [0:0] is the sub-list for method output_type
+	0, // [0:0] is the sub-list for method input_type
+	0, // [0:0] is the sub-list for extension type_name
+	0, // [0:0] is the sub-list for extension extendee
+	0, // [0:0] is the sub-list for field type_name
+}
+
+func init() { file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() }
+func file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_init() {
+	if File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto != nil {
+		return
+	}
+	type x struct{}
+	out := protoimpl.TypeBuilder{
+		File: protoimpl.DescBuilder{
+			GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
+			RawDescriptor: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc,
+			NumEnums:      0,
+			NumMessages:   0,
+			NumExtensions: 0,
+			NumServices:   0,
+		},
+		GoTypes:           file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes,
+		DependencyIndexes: file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs,
+	}.Build()
+	File_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto = out.File
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_rawDesc = nil
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_goTypes = nil
+	file_github_com_golang_protobuf_ptypes_timestamp_timestamp_proto_depIdxs = nil
+}
diff --git a/vendor/github.com/google/gopacket/.gitignore b/vendor/github.com/google/gopacket/.gitignore
new file mode 100644
index 0000000..149266f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.gitignore
@@ -0,0 +1,38 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+#*
+*~
+
+# examples binaries
+examples/synscan/synscan
+examples/pfdump/pfdump
+examples/pcapdump/pcapdump
+examples/httpassembly/httpassembly
+examples/statsassembly/statsassembly
+examples/arpscan/arpscan
+examples/bidirectional/bidirectional
+examples/bytediff/bytediff
+examples/reassemblydump/reassemblydump
+layers/gen
+macs/gen
+pcap/pcap_tester
diff --git a/vendor/github.com/google/gopacket/.travis.gofmt.sh b/vendor/github.com/google/gopacket/.travis.gofmt.sh
new file mode 100644
index 0000000..e341a1c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.gofmt.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+if [ -n "$(go fmt ./...)" ]; then
+  echo "Go code is not formatted, run 'go fmt github.com/google/stenographer/...'" >&2
+  exit 1
+fi
diff --git a/vendor/github.com/google/gopacket/.travis.golint.sh b/vendor/github.com/google/gopacket/.travis.golint.sh
new file mode 100644
index 0000000..0e267f5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.golint.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+
+go get golang.org/x/lint/golint
+DIRS=". tcpassembly tcpassembly/tcpreader ip4defrag reassembly macs pcapgo pcap afpacket pfring routing defrag/lcmdefrag"
+# Add subdirectories here as we clean up golint on each.
+for subdir in $DIRS; do
+  pushd $subdir
+  if golint |
+      grep -v CannotSetRFMon |  # pcap exported error name
+      grep -v DataLost |        # tcpassembly/tcpreader exported error name
+      grep .; then
+    exit 1
+  fi
+  popd
+done
+
+pushd layers
+for file in *.go; do
+  if cat .lint_blacklist | grep -q $file; then
+    echo "Skipping lint of $file due to .lint_blacklist"
+  elif golint $file | grep .; then
+    echo "Lint error in file $file"
+    exit 1
+  fi
+done
+popd
diff --git a/vendor/github.com/google/gopacket/.travis.govet.sh b/vendor/github.com/google/gopacket/.travis.govet.sh
new file mode 100644
index 0000000..a5c1354
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.govet.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+
+cd "$(dirname $0)"
+DIRS=". layers pcap pcapgo tcpassembly tcpassembly/tcpreader routing ip4defrag bytediff macs defrag/lcmdefrag"
+set -e
+for subdir in $DIRS; do
+  pushd $subdir
+  go vet
+  popd
+done
diff --git a/vendor/github.com/google/gopacket/.travis.install.sh b/vendor/github.com/google/gopacket/.travis.install.sh
new file mode 100644
index 0000000..648c901
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.install.sh
@@ -0,0 +1,9 @@
+#!/bin/bash
+
+set -ev
+
+go get github.com/google/gopacket
+go get github.com/google/gopacket/layers
+go get github.com/google/gopacket/tcpassembly
+go get github.com/google/gopacket/reassembly
+go get github.com/google/gopacket/pcapgo
diff --git a/vendor/github.com/google/gopacket/.travis.script.sh b/vendor/github.com/google/gopacket/.travis.script.sh
new file mode 100644
index 0000000..e0f0cda
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.script.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+
+set -ev
+
+go test github.com/google/gopacket
+go test github.com/google/gopacket/layers
+go test github.com/google/gopacket/tcpassembly
+go test github.com/google/gopacket/reassembly
+go test github.com/google/gopacket/pcapgo
+go test github.com/google/gopacket/pcap
+sudo $(which go) test github.com/google/gopacket/routing
diff --git a/vendor/github.com/google/gopacket/.travis.yml b/vendor/github.com/google/gopacket/.travis.yml
new file mode 100644
index 0000000..84f1f49
--- /dev/null
+++ b/vendor/github.com/google/gopacket/.travis.yml
@@ -0,0 +1,57 @@
+language: go
+go:
+ - 1.11.x
+ - 1.12.x
+ - 1.13.x
+ - master
+
+addons:
+  apt:
+    packages:
+      libpcap-dev
+
+# use modules except for older versions (see below)
+install: true
+
+env:
+  - GO111MODULE=on
+
+script: ./.travis.script.sh
+
+matrix:
+  fast_finish: true
+  allow_failures:
+    - go: master
+
+jobs:
+  include:
+    - go: 1.5.x
+      install: ./.travis.install.sh
+    - go: 1.6.x
+      install: ./.travis.install.sh
+    - go: 1.7.x
+      install: ./.travis.install.sh
+    - go: 1.8.x
+      install: ./.travis.install.sh
+    - go: 1.9.x
+      install: ./.travis.install.sh
+    - go: 1.10.x
+      install: ./.travis.install.sh
+    - os: osx
+      go: 1.x
+# windows doesn't work on travis (package installation just hangs and then errors out)
+#    - os: windows
+#      go: 1.x
+#      # We don't need nmap - but that's the only way to get npcap:
+#      before_install: choco install npcap --version 0.86 -y
+    - stage: style
+      name: "fmt/vet/lint"
+      go: 1.x
+      script:
+        - ./.travis.gofmt.sh
+        - ./.travis.govet.sh
+        - ./.travis.golint.sh
+
+stages:
+  - style
+  - test
diff --git a/vendor/github.com/google/gopacket/AUTHORS b/vendor/github.com/google/gopacket/AUTHORS
new file mode 100644
index 0000000..24e834e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/AUTHORS
@@ -0,0 +1,54 @@
+AUTHORS AND MAINTAINERS:
+
+MAIN DEVELOPERS:
+Graeme Connell   <gconnell@google.com, gsconnell@gmail.com>
+
+AUTHORS:
+Nigel Tao <nigeltao@google.com>
+Cole Mickens <cole.mickens@gmail.com>
+Ben Daglish <bdaglish@restorepoint.com>
+Luis Martinez <martinezlc99@gmail.com>
+Remco Verhoef <remco@dutchcoders.io>
+Hiroaki Kawai <Hiroaki.Kawai@gmail.com>
+Lukas Lueg <lukas.lueg@gmail.com>
+Laurent Hausermann <laurent.hausermann@gmail.com>
+Bill Green <bgreen@newrelic.com>
+Christian Mäder <christian.maeder@nine.ch>
+Gernot Vormayr <gvormayr@gmail.com>
+Vitor Garcia Graveto <victor.graveto@gmail.com>
+Elias Chavarria Reyes <elchavar@cisco.com>
+Daniel Rittweiler <ripx80@protonmail.com>
+
+CONTRIBUTORS:
+Attila Oláh <attila@attilaolah.eu>
+Vittus Mikiassen <matt.miki.vimik@gmail.com>
+Matthias Radestock <matthias.radestock@gmail.com>
+Matthew Sackman <matthew@wellquite.org>
+Loic Prylli <loicp@google.com>
+Alexandre Fiori <fiorix@gmail.com>
+Adrian Tam <adrian.c.m.tam@gmail.com>
+Satoshi Matsumoto <kaorimatz@gmail.com>
+David Stainton <dstainton415@gmail.com>
+Jesse Ward <jesse@jesseward.com>
+Kane Mathers <kane@kanemathers.name>
+Jose Selvi <jselvi@pentester.es>
+Yerden Zhumabekov <yerden.zhumabekov@gmail.com>
+Jensen Hwa <jensenhwa@gmail.com>
+
+-----------------------------------------------
+FORKED FROM github.com/akrennmair/gopcap
+ALL THE FOLLOWING ARE FOR THAT PROJECT
+
+MAIN DEVELOPERS:
+Andreas Krennmair <ak@synflood.at>
+
+CONTRIBUTORS:
+Andrea Nall <anall@andreanall.com>
+Daniel Arndt <danielarndt@gmail.com>
+Dustin Sallings <dustin@spy.net>
+Graeme Connell <gconnell@google.com, gsconnell@gmail.com>
+Guillaume Savary <guillaume@savary.name>
+Mark Smith <mark@qq.is>
+Miek Gieben <miek@miek.nl>
+Mike Bell <mike@mikebell.org>
+Trevor Strohman <strohman@google.com>
diff --git a/vendor/github.com/google/gopacket/CONTRIBUTING.md b/vendor/github.com/google/gopacket/CONTRIBUTING.md
new file mode 100644
index 0000000..99ab7a2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/CONTRIBUTING.md
@@ -0,0 +1,215 @@
+Contributing To gopacket
+========================
+
+So you've got some code and you'd like it to be part of gopacket... wonderful!
+We're happy to accept contributions, whether they're fixes to old protocols, new
+protocols entirely, or anything else you think would improve the gopacket
+library.  This document is designed to help you to do just that.
+
+The first section deals with the plumbing:  how to actually get a change
+submitted.
+
+The second section deals with coding style... Go is great in that it
+has a uniform style implemented by 'go fmt', but there's still some decisions
+we've made that go above and beyond, and if you follow them, they won't come up
+in your code review.
+
+The third section deals with some of the implementation decisions we've made,
+which may help you to understand the current code and which we may ask you to
+conform to (or provide compelling reasons for ignoring).
+
+Overall, we hope this document will help you to understand our system and write
+great code which fits in, and help us to turn around on your code review quickly
+so the code can make it into the master branch as quickly as possible.
+
+
+How To Submit Code
+------------------
+
+We use github.com's Pull Request feature to receive code contributions from
+external contributors.  See
+https://help.github.com/articles/creating-a-pull-request/ for details on
+how to create a request.
+
+Also, there's a local script `gc` in the base directory of GoPacket that
+runs a local set of checks, which should give you relatively high confidence
+that your pull won't fail github pull checks.
+
+```sh
+go get github.com/google/gopacket
+cd $GOROOT/src/pkg/github.com/google/gopacket
+git checkout -b <mynewfeature>  # create a new branch to work from
+... code code code ...
+./gc  # Run this to do local commits, it performs a number of checks
+```
+
+To sum up:
+
+* DO
+    + Pull down the latest version.
+    + Make a feature-specific branch.
+    + Code using the style and methods discussed in the rest of this document.
+    + Use the ./gc command to do local commits or check correctness.
+    + Push your new feature branch up to github.com, as a pull request.
+    + Handle comments and requests from reviewers, pushing new commits up to
+      your feature branch as problems are addressed.
+    + Put interesting comments and discussions into commit comments.
+* DON'T
+    + Push to someone else's branch without their permission.
+
+
+Coding Style
+------------
+
+* Go code must be run through `go fmt`, `go vet`, and `golint`
+* Follow http://golang.org/doc/effective_go.html as much as possible.
+    + In particular, http://golang.org/doc/effective_go.html#mixed-caps.  Enums
+      should be be CamelCase, with acronyms capitalized (TCPSourcePort, vs.
+      TcpSourcePort or TCP_SOURCE_PORT).
+* Bonus points for giving enum types a String() field.
+* Any exported types or functions should have commentary
+  (http://golang.org/doc/effective_go.html#commentary)
+
+
+Coding Methods And Implementation Notes
+---------------------------------------
+
+### Error Handling
+
+Many times, you'll be decoding a protocol and run across something bad, a packet
+corruption or the like.  How do you handle this?  First off, ALWAYS report the
+error.  You can do this either by returning the error from the decode() function
+(most common), or if you're up for it you can implement and add an ErrorLayer
+through the packet builder (the first method is a simple shortcut that does
+exactly this, then stops any future decoding).
+
+Often, you'll already have decode some part of your protocol by the time you hit
+your error.  Use your own discretion to determine whether the stuff you've
+already decoded should be returned to the caller or not:
+
+```go
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+  prot := &MyProtocol{}
+  if len(data) < 10 {
+    // This error occurred before we did ANYTHING, so there's nothing in my
+    // protocol that the caller could possibly want.  Just return the error.
+    return fmt.Errorf("Length %d less than 10", len(data))
+  }
+  prot.ImportantField1 = data[:5]
+  prot.ImportantField2 = data[5:10]
+  // At this point, we've already got enough information in 'prot' to
+  // warrant returning it to the caller, so we'll add it now.
+  p.AddLayer(prot)
+  if len(data) < 15 {
+    // We encountered an error later in the packet, but the caller already
+    // has the important info we've gleaned so far.
+    return fmt.Errorf("Length %d less than 15", len(data))
+  }
+  prot.ImportantField3 = data[10:15]
+  return nil  // We've already added the layer, we can just return success.
+}
+```
+
+In general, our code follows the approach of returning the first error it
+encounters.  In general, we don't trust any bytes after the first error we see.
+
+### What Is A Layer?
+
+The definition of a layer is up to the discretion of the coder.  It should be
+something important enough that it's actually useful to the caller (IE: every
+TLV value should probably NOT be a layer).  However, it can be more granular
+than a single protocol... IPv6 and SCTP both implement many layers to handle the
+various parts of the protocol.  Use your best judgement, and prepare to defend
+your decisions during code review. ;)
+
+### Performance
+
+We strive to make gopacket as fast as possible while still providing lots of
+features.  In general, this means:
+
+* Focus performance tuning on common protocols (IP4/6, TCP, etc), and optimize
+  others on an as-needed basis (tons of MPLS on your network?  Time to optimize
+  MPLS!)
+* Use fast operations.  See the toplevel benchmark_test for benchmarks of some
+  of Go's underlying features and types.
+* Test your performance changes!  You should use the ./gc script's --benchmark
+  flag to submit any performance-related changes.  Use pcap/gopacket_benchmark
+  to test your change against a PCAP file based on your traffic patterns.
+* Don't be TOO hacky.  Sometimes, removing an unused struct from a field causes
+  a huge performance hit, due to the way that Go currently handles its segmented
+  stack... don't be afraid to clean it up anyway.  We'll trust the Go compiler
+  to get good enough over time to handle this.  Also, this type of
+  compiler-specific optimization is very fragile; someone adding a field to an
+  entirely different struct elsewhere in the codebase could reverse any gains
+  you might achieve by aligning your allocations.
+* Try to minimize memory allocations.  If possible, use []byte to reference
+  pieces of the input, instead of using string, which requires copying the bytes
+  into a new memory allocation.
+* Think hard about what should be evaluated lazily vs. not.  In general, a
+  layer's struct should almost exactly mirror the layer's frame.  Anything
+  that's more interesting should be a function.  This may not always be
+  possible, but it's a good rule of thumb.
+* Don't fear micro-optimizations.  With the above in mind, we welcome
+  micro-optimizations that we think will have positive/neutral impacts on the
+  majority of workloads.  A prime example of this is pre-allocating certain
+  structs within a larger one:
+
+```go
+type MyProtocol struct {
+  // Most packets have 1-4 of VeryCommon, so we preallocate it here.
+  initialAllocation [4]uint32
+  VeryCommon []uint32
+}
+
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+  prot := &MyProtocol{}
+  prot.VeryCommon = proto.initialAllocation[:0]
+  for len(data) > 4 {
+    field := binary.BigEndian.Uint32(data[:4])
+    data = data[4:]
+    // Since we're using the underlying initialAllocation, we won't need to
+    // allocate new memory for the following append unless we more than 16
+    // bytes of data, which should be the uncommon case.
+    prot.VeryCommon = append(prot.VeryCommon, field)
+  }
+  p.AddLayer(prot)
+  if len(data) > 0 {
+    return fmt.Errorf("MyProtocol packet has %d bytes left after decoding", len(data))
+  }
+  return nil
+}
+```
+
+### Slices And Data
+
+If you're pulling a slice from the data you're decoding, don't copy it.  Just
+use the slice itself.
+
+```go
+type MyProtocol struct {
+  A, B net.IP
+}
+func decodeMyProtocol(data []byte, p gopacket.PacketBuilder) error {
+  p.AddLayer(&MyProtocol{
+    A: data[:4],
+    B: data[4:8],
+  })
+  return nil
+}
+```
+
+The caller has already agreed, by using this library, that they won't modify the
+set of bytes they pass in to the decoder, or the library has already copied the
+set of bytes to a read-only location.  See DecodeOptions.NoCopy for more
+information.
+
+### Enums/Types
+
+If a protocol has an integer field (uint8, uint16, etc) with a couple of known
+values that mean something special, make it a type.  This allows us to do really
+nice things like adding a String() function to them, so we can more easily
+display those to users.  Check out layers/enums.go for one example, as well as
+layers/icmp.go for layer-specific enums.
+
+When naming things, try for descriptiveness over suscinctness.  For example,
+choose DNSResponseRecord over DNSRR.
diff --git a/vendor/github.com/google/gopacket/LICENSE b/vendor/github.com/google/gopacket/LICENSE
new file mode 100644
index 0000000..2100d52
--- /dev/null
+++ b/vendor/github.com/google/gopacket/LICENSE
@@ -0,0 +1,28 @@
+Copyright (c) 2012 Google, Inc. All rights reserved.
+Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Andreas Krennmair, Google, nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/gopacket/README.md b/vendor/github.com/google/gopacket/README.md
new file mode 100644
index 0000000..efe462e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/README.md
@@ -0,0 +1,12 @@
+# GoPacket
+
+This library provides packet decoding capabilities for Go.
+See [godoc](https://godoc.org/github.com/google/gopacket) for more details.
+
+[![Build Status](https://travis-ci.org/google/gopacket.svg?branch=master)](https://travis-ci.org/google/gopacket)
+[![GoDoc](https://godoc.org/github.com/google/gopacket?status.svg)](https://godoc.org/github.com/google/gopacket)
+
+Minimum Go version required is 1.5 except for pcapgo/EthernetHandle, afpacket, and bsdbpf which need at least 1.9 due to x/sys/unix dependencies.
+
+Originally forked from the gopcap project written by Andreas
+Krennmair <ak@synflood.at> (http://github.com/akrennmair/gopcap).
diff --git a/vendor/github.com/google/gopacket/base.go b/vendor/github.com/google/gopacket/base.go
new file mode 100644
index 0000000..91e150c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/base.go
@@ -0,0 +1,178 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+)
+
+// Layer represents a single decoded packet layer (using either the
+// OSI or TCP/IP definition of a layer).  When decoding, a packet's data is
+// broken up into a number of layers.  The caller may call LayerType() to
+// figure out which type of layer they've received from the packet.  Optionally,
+// they may then use a type assertion to get the actual layer type for deep
+// inspection of the data.
+type Layer interface {
+	// LayerType is the gopacket type for this layer.
+	LayerType() LayerType
+	// LayerContents returns the set of bytes that make up this layer.
+	LayerContents() []byte
+	// LayerPayload returns the set of bytes contained within this layer, not
+	// including the layer itself.
+	LayerPayload() []byte
+}
+
+// Payload is a Layer containing the payload of a packet.  The definition of
+// what constitutes the payload of a packet depends on previous layers; for
+// TCP and UDP, we stop decoding above layer 4 and return the remaining
+// bytes as a Payload.  Payload is an ApplicationLayer.
+type Payload []byte
+
+// LayerType returns LayerTypePayload
+func (p Payload) LayerType() LayerType { return LayerTypePayload }
+
+// LayerContents returns the bytes making up this layer.
+func (p Payload) LayerContents() []byte { return []byte(p) }
+
+// LayerPayload returns the payload within this layer.
+func (p Payload) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as bytes.
+func (p Payload) Payload() []byte { return []byte(p) }
+
+// String implements fmt.Stringer.
+func (p Payload) String() string { return fmt.Sprintf("%d byte(s)", len(p)) }
+
+// GoString implements fmt.GoStringer.
+func (p Payload) GoString() string { return LongBytesGoString([]byte(p)) }
+
+// CanDecode implements DecodingLayer.
+func (p Payload) CanDecode() LayerClass { return LayerTypePayload }
+
+// NextLayerType implements DecodingLayer.
+func (p Payload) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Payload) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+	*p = Payload(data)
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p Payload) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+	bytes, err := b.PrependBytes(len(p))
+	if err != nil {
+		return err
+	}
+	copy(bytes, p)
+	return nil
+}
+
+// decodePayload decodes data by returning it all in a Payload layer.
+func decodePayload(data []byte, p PacketBuilder) error {
+	payload := &Payload{}
+	if err := payload.DecodeFromBytes(data, p); err != nil {
+		return err
+	}
+	p.AddLayer(payload)
+	p.SetApplicationLayer(payload)
+	return nil
+}
+
+// Fragment is a Layer containing a fragment of a larger frame, used by layers
+// like IPv4 and IPv6 that allow for fragmentation of their payloads.
+type Fragment []byte
+
+// LayerType returns LayerTypeFragment
+func (p *Fragment) LayerType() LayerType { return LayerTypeFragment }
+
+// LayerContents implements Layer.
+func (p *Fragment) LayerContents() []byte { return []byte(*p) }
+
+// LayerPayload implements Layer.
+func (p *Fragment) LayerPayload() []byte { return nil }
+
+// Payload returns this layer as a byte slice.
+func (p *Fragment) Payload() []byte { return []byte(*p) }
+
+// String implements fmt.Stringer.
+func (p *Fragment) String() string { return fmt.Sprintf("%d byte(s)", len(*p)) }
+
+// CanDecode implements DecodingLayer.
+func (p *Fragment) CanDecode() LayerClass { return LayerTypeFragment }
+
+// NextLayerType implements DecodingLayer.
+func (p *Fragment) NextLayerType() LayerType { return LayerTypeZero }
+
+// DecodeFromBytes implements DecodingLayer.
+func (p *Fragment) DecodeFromBytes(data []byte, df DecodeFeedback) error {
+	*p = Fragment(data)
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *Fragment) SerializeTo(b SerializeBuffer, opts SerializeOptions) error {
+	bytes, err := b.PrependBytes(len(*p))
+	if err != nil {
+		return err
+	}
+	copy(bytes, *p)
+	return nil
+}
+
+// decodeFragment decodes data by returning it all in a Fragment layer.
+func decodeFragment(data []byte, p PacketBuilder) error {
+	payload := &Fragment{}
+	if err := payload.DecodeFromBytes(data, p); err != nil {
+		return err
+	}
+	p.AddLayer(payload)
+	p.SetApplicationLayer(payload)
+	return nil
+}
+
+// These layers correspond to Internet Protocol Suite (TCP/IP) layers, and their
+// corresponding OSI layers, as best as possible.
+
+// LinkLayer is the packet layer corresponding to TCP/IP layer 1 (OSI layer 2)
+type LinkLayer interface {
+	Layer
+	LinkFlow() Flow
+}
+
+// NetworkLayer is the packet layer corresponding to TCP/IP layer 2 (OSI
+// layer 3)
+type NetworkLayer interface {
+	Layer
+	NetworkFlow() Flow
+}
+
+// TransportLayer is the packet layer corresponding to the TCP/IP layer 3 (OSI
+// layer 4)
+type TransportLayer interface {
+	Layer
+	TransportFlow() Flow
+}
+
+// ApplicationLayer is the packet layer corresponding to the TCP/IP layer 4 (OSI
+// layer 7), also known as the packet payload.
+type ApplicationLayer interface {
+	Layer
+	Payload() []byte
+}
+
+// ErrorLayer is a packet layer created when decoding of the packet has failed.
+// Its payload is all the bytes that we were unable to decode, and the returned
+// error details why the decoding failed.
+type ErrorLayer interface {
+	Layer
+	Error() error
+}
diff --git a/vendor/github.com/google/gopacket/decode.go b/vendor/github.com/google/gopacket/decode.go
new file mode 100644
index 0000000..2633f84
--- /dev/null
+++ b/vendor/github.com/google/gopacket/decode.go
@@ -0,0 +1,157 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"errors"
+)
+
+// DecodeFeedback is used by DecodingLayer layers to provide decoding metadata.
+type DecodeFeedback interface {
+	// SetTruncated should be called if during decoding you notice that a packet
+	// is shorter than internal layer variables (HeaderLength, or the like) say it
+	// should be.  It sets packet.Metadata().Truncated.
+	SetTruncated()
+}
+
+type nilDecodeFeedback struct{}
+
+func (nilDecodeFeedback) SetTruncated() {}
+
+// NilDecodeFeedback implements DecodeFeedback by doing nothing.
+var NilDecodeFeedback DecodeFeedback = nilDecodeFeedback{}
+
+// PacketBuilder is used by layer decoders to store the layers they've decoded,
+// and to defer future decoding via NextDecoder.
+// Typically, the pattern for use is:
+//  func (m *myDecoder) Decode(data []byte, p PacketBuilder) error {
+//    if myLayer, err := myDecodingLogic(data); err != nil {
+//      return err
+//    } else {
+//      p.AddLayer(myLayer)
+//    }
+//    // maybe do this, if myLayer is a LinkLayer
+//    p.SetLinkLayer(myLayer)
+//    return p.NextDecoder(nextDecoder)
+//  }
+type PacketBuilder interface {
+	DecodeFeedback
+	// AddLayer should be called by a decoder immediately upon successful
+	// decoding of a layer.
+	AddLayer(l Layer)
+	// The following functions set the various specific layers in the final
+	// packet.  Note that if many layers call SetX, the first call is kept and all
+	// other calls are ignored.
+	SetLinkLayer(LinkLayer)
+	SetNetworkLayer(NetworkLayer)
+	SetTransportLayer(TransportLayer)
+	SetApplicationLayer(ApplicationLayer)
+	SetErrorLayer(ErrorLayer)
+	// NextDecoder should be called by a decoder when they're done decoding a
+	// packet layer but not done with decoding the entire packet.  The next
+	// decoder will be called to decode the last AddLayer's LayerPayload.
+	// Because of this, NextDecoder must only be called once all other
+	// PacketBuilder calls have been made.  Set*Layer and AddLayer calls after
+	// NextDecoder calls will behave incorrectly.
+	NextDecoder(next Decoder) error
+	// DumpPacketData is used solely for decoding.  If you come across an error
+	// you need to diagnose while processing a packet, call this and your packet's
+	// data will be dumped to stderr so you can create a test.  This should never
+	// be called from a production decoder.
+	DumpPacketData()
+	// DecodeOptions returns the decode options
+	DecodeOptions() *DecodeOptions
+}
+
+// Decoder is an interface for logic to decode a packet layer.  Users may
+// implement a Decoder to handle their own strange packet types, or may use one
+// of the many decoders available in the 'layers' subpackage to decode things
+// for them.
+type Decoder interface {
+	// Decode decodes the bytes of a packet, sending decoded values and other
+	// information to PacketBuilder, and returning an error if unsuccessful.  See
+	// the PacketBuilder documentation for more details.
+	Decode([]byte, PacketBuilder) error
+}
+
+// DecodeFunc wraps a function to make it a Decoder.
+type DecodeFunc func([]byte, PacketBuilder) error
+
+// Decode implements Decoder by calling itself.
+func (d DecodeFunc) Decode(data []byte, p PacketBuilder) error {
+	// function, call thyself.
+	return d(data, p)
+}
+
+// DecodePayload is a Decoder that returns a Payload layer containing all
+// remaining bytes.
+var DecodePayload Decoder = DecodeFunc(decodePayload)
+
+// DecodeUnknown is a Decoder that returns an Unknown layer containing all
+// remaining bytes, useful if you run up against a layer that you're unable to
+// decode yet.  This layer is considered an ErrorLayer.
+var DecodeUnknown Decoder = DecodeFunc(decodeUnknown)
+
+// DecodeFragment is a Decoder that returns a Fragment layer containing all
+// remaining bytes.
+var DecodeFragment Decoder = DecodeFunc(decodeFragment)
+
+// LayerTypeZero is an invalid layer type, but can be used to determine whether
+// layer type has actually been set correctly.
+var LayerTypeZero = RegisterLayerType(0, LayerTypeMetadata{Name: "Unknown", Decoder: DecodeUnknown})
+
+// LayerTypeDecodeFailure is the layer type for the default error layer.
+var LayerTypeDecodeFailure = RegisterLayerType(1, LayerTypeMetadata{Name: "DecodeFailure", Decoder: DecodeUnknown})
+
+// LayerTypePayload is the layer type for a payload that we don't try to decode
+// but treat as a success, IE: an application-level payload.
+var LayerTypePayload = RegisterLayerType(2, LayerTypeMetadata{Name: "Payload", Decoder: DecodePayload})
+
+// LayerTypeFragment is the layer type for a fragment of a layer transported
+// by an underlying layer that supports fragmentation.
+var LayerTypeFragment = RegisterLayerType(3, LayerTypeMetadata{Name: "Fragment", Decoder: DecodeFragment})
+
+// DecodeFailure is a packet layer created if decoding of the packet data failed
+// for some reason.  It implements ErrorLayer.  LayerContents will be the entire
+// set of bytes that failed to parse, and Error will return the reason parsing
+// failed.
+type DecodeFailure struct {
+	data  []byte
+	err   error
+	stack []byte
+}
+
+// Error returns the error encountered during decoding.
+func (d *DecodeFailure) Error() error { return d.err }
+
+// LayerContents implements Layer.
+func (d *DecodeFailure) LayerContents() []byte { return d.data }
+
+// LayerPayload implements Layer.
+func (d *DecodeFailure) LayerPayload() []byte { return nil }
+
+// String implements fmt.Stringer.
+func (d *DecodeFailure) String() string {
+	return "Packet decoding error: " + d.Error().Error()
+}
+
+// Dump implements Dumper.
+func (d *DecodeFailure) Dump() (s string) {
+	if d.stack != nil {
+		s = string(d.stack)
+	}
+	return
+}
+
+// LayerType returns LayerTypeDecodeFailure
+func (d *DecodeFailure) LayerType() LayerType { return LayerTypeDecodeFailure }
+
+// decodeUnknown "decodes" unsupported data types by returning an error.
+// This decoder will thus always return a DecodeFailure layer.
+func decodeUnknown(data []byte, p PacketBuilder) error {
+	return errors.New("Layer type not currently supported")
+}
diff --git a/vendor/github.com/google/gopacket/doc.go b/vendor/github.com/google/gopacket/doc.go
new file mode 100644
index 0000000..b46e43d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/doc.go
@@ -0,0 +1,432 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package gopacket provides packet decoding for the Go language.
+
+gopacket contains many sub-packages with additional functionality you may find
+useful, including:
+
+ * layers: You'll probably use this every time.  This contains of the logic
+     built into gopacket for decoding packet protocols.  Note that all example
+     code below assumes that you have imported both gopacket and
+     gopacket/layers.
+ * pcap: C bindings to use libpcap to read packets off the wire.
+ * pfring: C bindings to use PF_RING to read packets off the wire.
+ * afpacket: C bindings for Linux's AF_PACKET to read packets off the wire.
+ * tcpassembly: TCP stream reassembly
+
+Also, if you're looking to dive right into code, see the examples subdirectory
+for numerous simple binaries built using gopacket libraries.
+
+Minimum go version required is 1.5 except for pcapgo/EthernetHandle, afpacket,
+and bsdbpf which need at least 1.7 due to x/sys/unix dependencies.
+
+Basic Usage
+
+gopacket takes in packet data as a []byte and decodes it into a packet with
+a non-zero number of "layers".  Each layer corresponds to a protocol
+within the bytes.  Once a packet has been decoded, the layers of the packet
+can be requested from the packet.
+
+ // Decode a packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ // Get the TCP layer from this packet
+ if tcpLayer := packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
+   fmt.Println("This is a TCP packet!")
+   // Get actual TCP data from this layer
+   tcp, _ := tcpLayer.(*layers.TCP)
+   fmt.Printf("From src port %d to dst port %d\n", tcp.SrcPort, tcp.DstPort)
+ }
+ // Iterate over all layers, printing out each layer type
+ for _, layer := range packet.Layers() {
+   fmt.Println("PACKET LAYER:", layer.LayerType())
+ }
+
+Packets can be decoded from a number of starting points.  Many of our base
+types implement Decoder, which allow us to decode packets for which
+we don't have full data.
+
+ // Decode an ethernet packet
+ ethP := gopacket.NewPacket(p1, layers.LayerTypeEthernet, gopacket.Default)
+ // Decode an IPv6 header and everything it contains
+ ipP := gopacket.NewPacket(p2, layers.LayerTypeIPv6, gopacket.Default)
+ // Decode a TCP header and its payload
+ tcpP := gopacket.NewPacket(p3, layers.LayerTypeTCP, gopacket.Default)
+
+
+Reading Packets From A Source
+
+Most of the time, you won't just have a []byte of packet data lying around.
+Instead, you'll want to read packets in from somewhere (file, interface, etc)
+and process them.  To do that, you'll want to build a PacketSource.
+
+First, you'll need to construct an object that implements the PacketDataSource
+interface.  There are implementations of this interface bundled with gopacket
+in the gopacket/pcap and gopacket/pfring subpackages... see their documentation
+for more information on their usage.  Once you have a PacketDataSource, you can
+pass it into NewPacketSource, along with a Decoder of your choice, to create
+a PacketSource.
+
+Once you have a PacketSource, you can read packets from it in multiple ways.
+See the docs for PacketSource for more details.  The easiest method is the
+Packets function, which returns a channel, then asynchronously writes new
+packets into that channel, closing the channel if the packetSource hits an
+end-of-file.
+
+  packetSource := ...  // construct using pcap or pfring
+  for packet := range packetSource.Packets() {
+    handlePacket(packet)  // do something with each packet
+  }
+
+You can change the decoding options of the packetSource by setting fields in
+packetSource.DecodeOptions... see the following sections for more details.
+
+
+Lazy Decoding
+
+gopacket optionally decodes packet data lazily, meaning it
+only decodes a packet layer when it needs to handle a function call.
+
+ // Create a packet, but don't actually decode anything yet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Now, decode the packet up to the first IPv4 layer found but no further.
+ // If no IPv4 layer was found, the whole packet will be decoded looking for
+ // it.
+ ip4 := packet.Layer(layers.LayerTypeIPv4)
+ // Decode all layers and return them.  The layers up to the first IPv4 layer
+ // are already decoded, and will not require decoding a second time.
+ layers := packet.Layers()
+
+Lazily-decoded packets are not concurrency-safe.  Since layers have not all been
+decoded, each call to Layer() or Layers() has the potential to mutate the packet
+in order to decode the next layer.  If a packet is used
+in multiple goroutines concurrently, don't use gopacket.Lazy.  Then gopacket
+will decode the packet fully, and all future function calls won't mutate the
+object.
+
+
+NoCopy Decoding
+
+By default, gopacket will copy the slice passed to NewPacket and store the
+copy within the packet, so future mutations to the bytes underlying the slice
+don't affect the packet and its layers.  If you can guarantee that the
+underlying slice bytes won't be changed, you can use NoCopy to tell
+gopacket.NewPacket, and it'll use the passed-in slice itself.
+
+ // This channel returns new byte slices, each of which points to a new
+ // memory location that's guaranteed immutable for the duration of the
+ // packet.
+ for data := range myByteSliceChannel {
+   p := gopacket.NewPacket(data, layers.LayerTypeEthernet, gopacket.NoCopy)
+   doSomethingWithPacket(p)
+ }
+
+The fastest method of decoding is to use both Lazy and NoCopy, but note from
+the many caveats above that for some implementations either or both may be
+dangerous.
+
+
+Pointers To Known Layers
+
+During decoding, certain layers are stored in the packet as well-known
+layer types.  For example, IPv4 and IPv6 are both considered NetworkLayer
+layers, while TCP and UDP are both TransportLayer layers.  We support 4
+layers, corresponding to the 4 layers of the TCP/IP layering scheme (roughly
+anagalous to layers 2, 3, 4, and 7 of the OSI model).  To access these,
+you can use the packet.LinkLayer, packet.NetworkLayer,
+packet.TransportLayer, and packet.ApplicationLayer functions.  Each of
+these functions returns a corresponding interface
+(gopacket.{Link,Network,Transport,Application}Layer).  The first three
+provide methods for getting src/dst addresses for that particular layer,
+while the final layer provides a Payload function to get payload data.
+This is helpful, for example, to get payloads for all packets regardless
+of their underlying data type:
+
+ // Get packets from some source
+ for packet := range someSource {
+   if app := packet.ApplicationLayer(); app != nil {
+     if strings.Contains(string(app.Payload()), "magic string") {
+       fmt.Println("Found magic string in a packet!")
+     }
+   }
+ }
+
+A particularly useful layer is ErrorLayer, which is set whenever there's
+an error parsing part of the packet.
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Default)
+ if err := packet.ErrorLayer(); err != nil {
+   fmt.Println("Error decoding some part of the packet:", err)
+ }
+
+Note that we don't return an error from NewPacket because we may have decoded
+a number of layers successfully before running into our erroneous layer.  You
+may still be able to get your Ethernet and IPv4 layers correctly, even if
+your TCP layer is malformed.
+
+
+Flow And Endpoint
+
+gopacket has two useful objects, Flow and Endpoint, for communicating in a protocol
+independent manner the fact that a packet is coming from A and going to B.
+The general layer types LinkLayer, NetworkLayer, and TransportLayer all provide
+methods for extracting their flow information, without worrying about the type
+of the underlying Layer.
+
+A Flow is a simple object made up of a set of two Endpoints, one source and one
+destination.  It details the sender and receiver of the Layer of the Packet.
+
+An Endpoint is a hashable representation of a source or destination.  For
+example, for LayerTypeIPv4, an Endpoint contains the IP address bytes for a v4
+IP packet.  A Flow can be broken into Endpoints, and Endpoints can be combined
+into Flows:
+
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ netFlow := packet.NetworkLayer().NetworkFlow()
+ src, dst := netFlow.Endpoints()
+ reverseFlow := gopacket.NewFlow(dst, src)
+
+Both Endpoint and Flow objects can be used as map keys, and the equality
+operator can compare them, so you can easily group together all packets
+based on endpoint criteria:
+
+ flows := map[gopacket.Endpoint]chan gopacket.Packet
+ packet := gopacket.NewPacket(myPacketData, layers.LayerTypeEthernet, gopacket.Lazy)
+ // Send all TCP packets to channels based on their destination port.
+ if tcp := packet.Layer(layers.LayerTypeTCP); tcp != nil {
+   flows[tcp.TransportFlow().Dst()] <- packet
+ }
+ // Look for all packets with the same source and destination network address
+ if net := packet.NetworkLayer(); net != nil {
+   src, dst := net.NetworkFlow().Endpoints()
+   if src == dst {
+     fmt.Println("Fishy packet has same network source and dst: %s", src)
+   }
+ }
+ // Find all packets coming from UDP port 1000 to UDP port 500
+ interestingFlow := gopacket.FlowFromEndpoints(layers.NewUDPPortEndpoint(1000), layers.NewUDPPortEndpoint(500))
+ if t := packet.NetworkLayer(); t != nil && t.TransportFlow() == interestingFlow {
+   fmt.Println("Found that UDP flow I was looking for!")
+ }
+
+For load-balancing purposes, both Flow and Endpoint have FastHash() functions,
+which provide quick, non-cryptographic hashes of their contents.  Of particular
+importance is the fact that Flow FastHash() is symmetric: A->B will have the same
+hash as B->A.  An example usage could be:
+
+ channels := [8]chan gopacket.Packet
+ for i := 0; i < 8; i++ {
+   channels[i] = make(chan gopacket.Packet)
+   go packetHandler(channels[i])
+ }
+ for packet := range getPackets() {
+   if net := packet.NetworkLayer(); net != nil {
+     channels[int(net.NetworkFlow().FastHash()) & 0x7] <- packet
+   }
+ }
+
+This allows us to split up a packet stream while still making sure that each
+stream sees all packets for a flow (and its bidirectional opposite).
+
+
+Implementing Your Own Decoder
+
+If your network has some strange encapsulation, you can implement your own
+decoder.  In this example, we handle Ethernet packets which are encapsulated
+in a 4-byte header.
+
+ // Create a layer type, should be unique and high, so it doesn't conflict,
+ // giving it a name and a decoder to use.
+ var MyLayerType = gopacket.RegisterLayerType(12345, gopacket.LayerTypeMetadata{Name: "MyLayerType", Decoder: gopacket.DecodeFunc(decodeMyLayer)})
+
+ // Implement my layer
+ type MyLayer struct {
+   StrangeHeader []byte
+   payload []byte
+ }
+ func (m MyLayer) LayerType() gopacket.LayerType { return MyLayerType }
+ func (m MyLayer) LayerContents() []byte { return m.StrangeHeader }
+ func (m MyLayer) LayerPayload() []byte { return m.payload }
+
+ // Now implement a decoder... this one strips off the first 4 bytes of the
+ // packet.
+ func decodeMyLayer(data []byte, p gopacket.PacketBuilder) error {
+   // Create my layer
+   p.AddLayer(&MyLayer{data[:4], data[4:]})
+   // Determine how to handle the rest of the packet
+   return p.NextDecoder(layers.LayerTypeEthernet)
+ }
+
+ // Finally, decode your packets:
+ p := gopacket.NewPacket(data, MyLayerType, gopacket.Lazy)
+
+See the docs for Decoder and PacketBuilder for more details on how coding
+decoders works, or look at RegisterLayerType and RegisterEndpointType to see how
+to add layer/endpoint types to gopacket.
+
+
+Fast Decoding With DecodingLayerParser
+
+TLDR:  DecodingLayerParser takes about 10% of the time as NewPacket to decode
+packet data, but only for known packet stacks.
+
+Basic decoding using gopacket.NewPacket or PacketSource.Packets is somewhat slow
+due to its need to allocate a new packet and every respective layer.  It's very
+versatile and can handle all known layer types, but sometimes you really only
+care about a specific set of layers regardless, so that versatility is wasted.
+
+DecodingLayerParser avoids memory allocation altogether by decoding packet
+layers directly into preallocated objects, which you can then reference to get
+the packet's information.  A quick example:
+
+ func main() {
+   var eth layers.Ethernet
+   var ip4 layers.IPv4
+   var ip6 layers.IPv6
+   var tcp layers.TCP
+   parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &eth, &ip4, &ip6, &tcp)
+   decoded := []gopacket.LayerType{}
+   for packetData := range somehowGetPacketData() {
+     if err := parser.DecodeLayers(packetData, &decoded); err != nil {
+       fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err)
+       continue
+     }
+     for _, layerType := range decoded {
+       switch layerType {
+         case layers.LayerTypeIPv6:
+           fmt.Println("    IP6 ", ip6.SrcIP, ip6.DstIP)
+         case layers.LayerTypeIPv4:
+           fmt.Println("    IP4 ", ip4.SrcIP, ip4.DstIP)
+       }
+     }
+   }
+ }
+
+The important thing to note here is that the parser is modifying the passed in
+layers (eth, ip4, ip6, tcp) instead of allocating new ones, thus greatly
+speeding up the decoding process.  It's even branching based on layer type...
+it'll handle an (eth, ip4, tcp) or (eth, ip6, tcp) stack.  However, it won't
+handle any other type... since no other decoders were passed in, an (eth, ip4,
+udp) stack will stop decoding after ip4, and only pass back [LayerTypeEthernet,
+LayerTypeIPv4] through the 'decoded' slice (along with an error saying it can't
+decode a UDP packet).
+
+Unfortunately, not all layers can be used by DecodingLayerParser... only those
+implementing the DecodingLayer interface are usable.  Also, it's possible to
+create DecodingLayers that are not themselves Layers... see
+layers.IPv6ExtensionSkipper for an example of this.
+
+Faster And Customized Decoding with DecodingLayerContainer
+
+By default, DecodingLayerParser uses native map to store and search for a layer
+to decode. Though being versatile, in some cases this solution may be not so
+optimal. For example, if you have only few layers faster operations may be
+provided by sparse array indexing or linear array scan.
+
+To accomodate these scenarios, DecodingLayerContainer interface is introduced
+along with its implementations: DecodingLayerSparse, DecodingLayerArray and
+DecodingLayerMap. You can specify a container implementation to
+DecodingLayerParser with SetDecodingLayerContainer method. Example:
+
+ dlp := gopacket.NewDecodingLayerParser(LayerTypeEthernet)
+ dlp.SetDecodingLayerContainer(gopacket.DecodingLayerSparse(nil))
+ var eth layers.Ethernet
+ dlp.AddDecodingLayer(&eth)
+ // ... add layers and use DecodingLayerParser as usual...
+
+To skip one level of indirection (though sacrificing some capabilities) you may
+also use DecodingLayerContainer as a decoding tool as it is. In this case you have to
+handle unknown layer types and layer panics by yourself. Example:
+
+ func main() {
+   var eth layers.Ethernet
+   var ip4 layers.IPv4
+   var ip6 layers.IPv6
+   var tcp layers.TCP
+   dlc := gopacket.DecodingLayerContainer(gopacket.DecodingLayerArray(nil))
+   dlc = dlc.Put(&eth)
+   dlc = dlc.Put(&ip4)
+   dlc = dlc.Put(&ip6)
+   dlc = dlc.Put(&tcp)
+   // you may specify some meaningful DecodeFeedback
+   decoder := dlc.LayersDecoder(LayerTypeEthernet, gopacket.NilDecodeFeedback)
+   decoded := make([]gopacket.LayerType, 0, 20)
+   for packetData := range somehowGetPacketData() {
+     lt, err := decoder(packetData, &decoded)
+     if err != nil {
+       fmt.Fprintf(os.Stderr, "Could not decode layers: %v\n", err)
+       continue
+     }
+     if lt != gopacket.LayerTypeZero {
+       fmt.Fprintf(os.Stderr, "unknown layer type: %v\n", lt)
+       continue
+     }
+     for _, layerType := range decoded {
+       // examine decoded layertypes just as already shown above
+     }
+   }
+ }
+
+DecodingLayerSparse is the fastest but most effective when LayerType values
+that layers in use can decode are not large because otherwise that would lead
+to bigger memory footprint. DecodingLayerArray is very compact and primarily
+usable if the number of decoding layers is not big (up to ~10-15, but please do
+your own benchmarks). DecodingLayerMap is the most versatile one and used by
+DecodingLayerParser by default. Please refer to tests and benchmarks in layers
+subpackage to further examine usage examples and performance measurements.
+
+You may also choose to implement your own DecodingLayerContainer if you want to
+make use of your own internal packet decoding logic.
+
+Creating Packet Data
+
+As well as offering the ability to decode packet data, gopacket will allow you
+to create packets from scratch, as well.  A number of gopacket layers implement
+the SerializableLayer interface; these layers can be serialized to a []byte in
+the following manner:
+
+  ip := &layers.IPv4{
+    SrcIP: net.IP{1, 2, 3, 4},
+    DstIP: net.IP{5, 6, 7, 8},
+    // etc...
+  }
+  buf := gopacket.NewSerializeBuffer()
+  opts := gopacket.SerializeOptions{}  // See SerializeOptions for more details.
+  err := ip.SerializeTo(buf, opts)
+  if err != nil { panic(err) }
+  fmt.Println(buf.Bytes())  // prints out a byte slice containing the serialized IPv4 layer.
+
+SerializeTo PREPENDS the given layer onto the SerializeBuffer, and they treat
+the current buffer's Bytes() slice as the payload of the serializing layer.
+Therefore, you can serialize an entire packet by serializing a set of layers in
+reverse order (Payload, then TCP, then IP, then Ethernet, for example).  The
+SerializeBuffer's SerializeLayers function is a helper that does exactly that.
+
+To generate a (empty and useless, because no fields are set)
+Ethernet(IPv4(TCP(Payload))) packet, for example, you can run:
+
+  buf := gopacket.NewSerializeBuffer()
+  opts := gopacket.SerializeOptions{}
+  gopacket.SerializeLayers(buf, opts,
+    &layers.Ethernet{},
+    &layers.IPv4{},
+    &layers.TCP{},
+    gopacket.Payload([]byte{1, 2, 3, 4}))
+  packetData := buf.Bytes()
+
+A Final Note
+
+If you use gopacket, you'll almost definitely want to make sure gopacket/layers
+is imported, since when imported it sets all the LayerType variables and fills
+in a lot of interesting variables/maps (DecodersByLayerName, etc).  Therefore,
+it's recommended that even if you don't use any layers functions directly, you still import with:
+
+  import (
+    _ "github.com/google/gopacket/layers"
+  )
+*/
+package gopacket
diff --git a/vendor/github.com/google/gopacket/flows.go b/vendor/github.com/google/gopacket/flows.go
new file mode 100644
index 0000000..a00c883
--- /dev/null
+++ b/vendor/github.com/google/gopacket/flows.go
@@ -0,0 +1,236 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"bytes"
+	"fmt"
+	"strconv"
+)
+
+// MaxEndpointSize determines the maximum size in bytes of an endpoint address.
+//
+// Endpoints/Flows have a problem:  They need to be hashable.  Therefore, they
+// can't use a byte slice.  The two obvious choices are to use a string or a
+// byte array.  Strings work great, but string creation requires memory
+// allocation, which can be slow.  Arrays work great, but have a fixed size.  We
+// originally used the former, now we've switched to the latter.  Use of a fixed
+// byte-array doubles the speed of constructing a flow (due to not needing to
+// allocate).  This is a huge increase... too much for us to pass up.
+//
+// The end result of this, though, is that an endpoint/flow can't be created
+// using more than MaxEndpointSize bytes per address.
+const MaxEndpointSize = 16
+
+// Endpoint is the set of bytes used to address packets at various layers.
+// See LinkLayer, NetworkLayer, and TransportLayer specifications.
+// Endpoints are usable as map keys.
+type Endpoint struct {
+	typ EndpointType
+	len int
+	raw [MaxEndpointSize]byte
+}
+
+// EndpointType returns the endpoint type associated with this endpoint.
+func (a Endpoint) EndpointType() EndpointType { return a.typ }
+
+// Raw returns the raw bytes of this endpoint.  These aren't human-readable
+// most of the time, but they are faster than calling String.
+func (a Endpoint) Raw() []byte { return a.raw[:a.len] }
+
+// LessThan provides a stable ordering for all endpoints.  It sorts first based
+// on the EndpointType of an endpoint, then based on the raw bytes of that
+// endpoint.
+//
+// For some endpoints, the actual comparison may not make sense, however this
+// ordering does provide useful information for most Endpoint types.
+// Ordering is based first on endpoint type, then on raw endpoint bytes.
+// Endpoint bytes are sorted lexicographically.
+func (a Endpoint) LessThan(b Endpoint) bool {
+	return a.typ < b.typ || (a.typ == b.typ && bytes.Compare(a.raw[:a.len], b.raw[:b.len]) < 0)
+}
+
+// fnvHash is used by our FastHash functions, and implements the FNV hash
+// created by Glenn Fowler, Landon Curt Noll, and Phong Vo.
+// See http://isthe.com/chongo/tech/comp/fnv/.
+func fnvHash(s []byte) (h uint64) {
+	h = fnvBasis
+	for i := 0; i < len(s); i++ {
+		h ^= uint64(s[i])
+		h *= fnvPrime
+	}
+	return
+}
+
+const fnvBasis = 14695981039346656037
+const fnvPrime = 1099511628211
+
+// FastHash provides a quick hashing function for an endpoint, useful if you'd
+// like to split up endpoints by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (a Endpoint) FastHash() (h uint64) {
+	h = fnvHash(a.raw[:a.len])
+	h ^= uint64(a.typ)
+	h *= fnvPrime
+	return
+}
+
+// NewEndpoint creates a new Endpoint object.
+//
+// The size of raw must be less than MaxEndpointSize, otherwise this function
+// will panic.
+func NewEndpoint(typ EndpointType, raw []byte) (e Endpoint) {
+	e.len = len(raw)
+	if e.len > MaxEndpointSize {
+		panic("raw byte length greater than MaxEndpointSize")
+	}
+	e.typ = typ
+	copy(e.raw[:], raw)
+	return
+}
+
+// EndpointTypeMetadata is used to register a new endpoint type.
+type EndpointTypeMetadata struct {
+	// Name is the string returned by an EndpointType's String function.
+	Name string
+	// Formatter is called from an Endpoint's String function to format the raw
+	// bytes in an Endpoint into a human-readable string.
+	Formatter func([]byte) string
+}
+
+// EndpointType is the type of a gopacket Endpoint.  This type determines how
+// the bytes stored in the endpoint should be interpreted.
+type EndpointType int64
+
+var endpointTypes = map[EndpointType]EndpointTypeMetadata{}
+
+// RegisterEndpointType creates a new EndpointType and registers it globally.
+// It MUST be passed a unique number, or it will panic.  Numbers 0-999 are
+// reserved for gopacket's use.
+func RegisterEndpointType(num int, meta EndpointTypeMetadata) EndpointType {
+	t := EndpointType(num)
+	if _, ok := endpointTypes[t]; ok {
+		panic("Endpoint type number already in use")
+	}
+	endpointTypes[t] = meta
+	return t
+}
+
+func (e EndpointType) String() string {
+	if t, ok := endpointTypes[e]; ok {
+		return t.Name
+	}
+	return strconv.Itoa(int(e))
+}
+
+func (a Endpoint) String() string {
+	if t, ok := endpointTypes[a.typ]; ok && t.Formatter != nil {
+		return t.Formatter(a.raw[:a.len])
+	}
+	return fmt.Sprintf("%v:%v", a.typ, a.raw)
+}
+
+// Flow represents the direction of traffic for a packet layer, as a source and destination Endpoint.
+// Flows are usable as map keys.
+type Flow struct {
+	typ        EndpointType
+	slen, dlen int
+	src, dst   [MaxEndpointSize]byte
+}
+
+// FlowFromEndpoints creates a new flow by pasting together two endpoints.
+// The endpoints must have the same EndpointType, or this function will return
+// an error.
+func FlowFromEndpoints(src, dst Endpoint) (_ Flow, err error) {
+	if src.typ != dst.typ {
+		err = fmt.Errorf("Mismatched endpoint types: %v->%v", src.typ, dst.typ)
+		return
+	}
+	return Flow{src.typ, src.len, dst.len, src.raw, dst.raw}, nil
+}
+
+// FastHash provides a quick hashing function for a flow, useful if you'd
+// like to split up flows by modulos or other load-balancing techniques.
+// It uses a variant of Fowler-Noll-Vo hashing, and is guaranteed to collide
+// with its reverse flow.  IE: the flow A->B will have the same hash as the flow
+// B->A.
+//
+// The output of FastHash is not guaranteed to remain the same through future
+// code revisions, so should not be used to key values in persistent storage.
+func (f Flow) FastHash() (h uint64) {
+	// This combination must be commutative.  We don't use ^, since that would
+	// give the same hash for all A->A flows.
+	h = fnvHash(f.src[:f.slen]) + fnvHash(f.dst[:f.dlen])
+	h ^= uint64(f.typ)
+	h *= fnvPrime
+	return
+}
+
+// String returns a human-readable representation of this flow, in the form
+// "Src->Dst"
+func (f Flow) String() string {
+	s, d := f.Endpoints()
+	return fmt.Sprintf("%v->%v", s, d)
+}
+
+// EndpointType returns the EndpointType for this Flow.
+func (f Flow) EndpointType() EndpointType {
+	return f.typ
+}
+
+// Endpoints returns the two Endpoints for this flow.
+func (f Flow) Endpoints() (src, dst Endpoint) {
+	return Endpoint{f.typ, f.slen, f.src}, Endpoint{f.typ, f.dlen, f.dst}
+}
+
+// Src returns the source Endpoint for this flow.
+func (f Flow) Src() (src Endpoint) {
+	src, _ = f.Endpoints()
+	return
+}
+
+// Dst returns the destination Endpoint for this flow.
+func (f Flow) Dst() (dst Endpoint) {
+	_, dst = f.Endpoints()
+	return
+}
+
+// Reverse returns a new flow with endpoints reversed.
+func (f Flow) Reverse() Flow {
+	return Flow{f.typ, f.dlen, f.slen, f.dst, f.src}
+}
+
+// NewFlow creates a new flow.
+//
+// src and dst must have length <= MaxEndpointSize, otherwise NewFlow will
+// panic.
+func NewFlow(t EndpointType, src, dst []byte) (f Flow) {
+	f.slen = len(src)
+	f.dlen = len(dst)
+	if f.slen > MaxEndpointSize || f.dlen > MaxEndpointSize {
+		panic("flow raw byte length greater than MaxEndpointSize")
+	}
+	f.typ = t
+	copy(f.src[:], src)
+	copy(f.dst[:], dst)
+	return
+}
+
+// EndpointInvalid is an endpoint type used for invalid endpoints, IE endpoints
+// that are specified incorrectly during creation.
+var EndpointInvalid = RegisterEndpointType(0, EndpointTypeMetadata{Name: "invalid", Formatter: func(b []byte) string {
+	return fmt.Sprintf("%v", b)
+}})
+
+// InvalidEndpoint is a singleton Endpoint of type EndpointInvalid.
+var InvalidEndpoint = NewEndpoint(EndpointInvalid, nil)
+
+// InvalidFlow is a singleton Flow of type EndpointInvalid.
+var InvalidFlow = NewFlow(EndpointInvalid, nil, nil)
diff --git a/vendor/github.com/google/gopacket/gc b/vendor/github.com/google/gopacket/gc
new file mode 100644
index 0000000..b1d8d2e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/gc
@@ -0,0 +1,288 @@
+#!/bin/bash
+# Copyright 2012 Google, Inc. All rights reserved.
+
+# This script provides a simple way to run benchmarks against previous code and
+# keep a log of how benchmarks change over time.  When used with the --benchmark
+# flag, it runs benchmarks from the current code and from the last commit run
+# with --benchmark, then stores the results in the git commit description.  We
+# rerun the old benchmarks along with the new ones, since there's no guarantee
+# that git commits will happen on the same machine, so machine differences could
+# cause wildly inaccurate results.
+#
+# If you're making changes to 'gopacket' which could cause performance changes,
+# you may be requested to use this commit script to make sure your changes don't
+# have large detrimental effects (or to show off how awesome your performance
+# improvements are).
+#
+# If not run with the --benchmark flag, this script is still very useful... it
+# makes sure all the correct go formatting, building, and testing work as
+# expected.
+
+function Usage {
+  cat <<EOF
+USAGE:  $0 [--benchmark regexp] [--root] [--gen] <git commit flags...>
+
+--benchmark:  Run benchmark comparisons against last benchmark'd commit
+--root:  Run tests that require root priviledges
+--gen:  Generate code for MACs/ports by pulling down external data
+
+Note, some 'git commit' flags are necessary, if all else fails, pass in -a
+EOF
+  exit 1
+}
+
+BENCH=""
+GEN=""
+ROOT=""
+while [ ! -z "$1" ]; do
+  case "$1" in
+    "--benchmark")
+      BENCH="$2"
+      shift
+      shift
+      ;;
+    "--gen")
+      GEN="yes"
+      shift
+      ;;
+    "--root")
+      ROOT="yes"
+      shift
+      ;;
+    "--help")
+      Usage
+      ;;
+    "-h")
+      Usage
+      ;;
+    "help")
+      Usage
+      ;;
+    *)
+      break
+      ;;
+  esac
+done
+
+function Root {
+  if [ ! -z "$ROOT" ]; then
+    local exec="$1"
+    # Some folks (like me) keep source code in places inaccessible by root (like
+    # NFS), so to make sure things run smoothly we copy them to a /tmp location.
+    local tmpfile="$(mktemp -t gopacket_XXXXXXXX)"
+    echo "Running root test executable $exec as $tmpfile"
+    cp "$exec" "$tmpfile"
+    chmod a+x "$tmpfile"
+    shift
+    sudo "$tmpfile" "$@"
+  fi
+}
+
+if [ "$#" -eq "0" ]; then
+  Usage
+fi
+
+cd $(dirname $0)
+
+# Check for copyright notices.
+for filename in $(find ./ -type f -name '*.go'); do
+  if ! head -n 1 "$filename" | grep -q Copyright; then
+    echo "File '$filename' may not have copyright notice"
+    exit 1
+  fi
+done
+
+set -e
+set -x
+
+if [ ! -z "$ROOT" ]; then
+  echo "Running SUDO to get root priviledges for root tests"
+  sudo echo "have root"
+fi
+
+if [ ! -z "$GEN" ]; then
+  pushd macs
+  go run gen.go | gofmt > valid_mac_prefixes.go
+  popd
+  pushd layers
+  go run gen.go | gofmt > iana_ports.go
+  go run gen2.go | gofmt > enums_generated.go
+  popd
+fi
+
+# Make sure everything is formatted, compiles, and tests pass.
+go fmt ./...
+go test -i ./... 2>/dev/null >/dev/null || true
+go test
+go build
+pushd examples/bytediff
+go build
+popd
+if [ -f /usr/include/pcap.h ]; then
+  pushd pcap
+  go test ./...
+  go build ./...
+  go build pcap_tester.go
+  Root pcap_tester --mode=basic
+  Root pcap_tester --mode=filtered
+  Root pcap_tester --mode=timestamp || echo "You might not support timestamp sources"
+  popd
+  pushd examples/afpacket
+  go build
+  popd
+  pushd examples/pcapdump
+  go build
+  popd
+  pushd examples/arpscan
+  go build
+  popd
+  pushd examples/bidirectional
+  go build
+  popd
+  pushd examples/synscan
+  go build
+  popd
+  pushd examples/httpassembly
+  go build
+  popd
+  pushd examples/statsassembly
+  go build
+  popd
+fi
+pushd macs
+go test ./...
+gofmt -w gen.go
+go build gen.go
+popd
+pushd tcpassembly
+go test ./...
+popd
+pushd reassembly
+go test ./...
+popd
+pushd layers
+gofmt -w gen.go
+go build gen.go
+go test ./...
+popd
+pushd pcapgo
+go test ./...
+go build ./...
+popd
+if [ -f /usr/include/linux/if_packet.h ]; then
+  if grep -q TPACKET_V3 /usr/include/linux/if_packet.h; then
+    pushd afpacket
+    go build ./...
+    go test ./...
+    popd
+  fi
+fi
+if [ -f /usr/include/pfring.h ]; then
+  pushd pfring
+  go test ./...
+  go build ./...
+  popd
+  pushd examples/pfdump
+  go build
+  popd
+fi
+pushd ip4defrag
+go test ./...
+popd
+pushd defrag
+go test ./...
+popd
+
+for travis_script in `ls .travis.*.sh`; do
+  ./$travis_script
+done
+
+# Run our initial commit
+git commit "$@"
+
+if [ -z "$BENCH" ]; then
+  set +x
+  echo "We're not benchmarking and we've committed... we're done!"
+  exit
+fi
+
+### If we get here, we want to run benchmarks from current commit, and compare
+### then to benchmarks from the last --benchmark commit.
+
+# Get our current branch.
+BRANCH="$(git branch | grep '^*' | awk '{print $2}')"
+
+# File we're going to build our commit description in.
+COMMIT_FILE="$(mktemp /tmp/tmp.XXXXXXXX)"
+
+# Add the word "BENCH" to the start of the git commit.
+echo -n "BENCH " > $COMMIT_FILE
+
+# Get the current description... there must be an easier way.
+git log -n 1 | grep '^ ' | sed 's/^    //' >> $COMMIT_FILE
+
+# Get the commit sha for the last benchmark commit
+PREV=$(git log -n 1 --grep='BENCHMARK_MARKER_DO_NOT_CHANGE' | head -n 1 | awk '{print $2}')
+
+## Run current benchmarks
+
+cat >> $COMMIT_FILE <<EOF
+
+
+----------------------------------------------------------
+BENCHMARK_MARKER_DO_NOT_CHANGE
+----------------------------------------------------------
+
+Go version $(go version)
+
+
+TEST BENCHMARKS "$BENCH"
+EOF
+# go seems to have trouble with 'go test --bench=. ./...'
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <<EOF
+
+
+PCAP BENCHMARK
+EOF
+if [ "$BENCH" -eq ".*" ]; then
+  go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset to last benchmark commit, run benchmarks
+
+git checkout $PREV
+
+cat >> $COMMIT_FILE <<EOF
+----------------------------------------------------------
+BENCHMARKING AGAINST COMMIT $PREV
+----------------------------------------------------------
+
+
+OLD TEST BENCHMARKS
+EOF
+# go seems to have trouble with 'go test --bench=. ./...'
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+pushd layers
+go test --test.bench="$BENCH" 2>&1 | tee -a $COMMIT_FILE
+popd
+cat >> $COMMIT_FILE <<EOF
+
+
+OLD PCAP BENCHMARK
+EOF
+if [ "$BENCH" -eq ".*" ]; then
+  go run pcap/gopacket_benchmark/*.go 2>&1 | tee -a $COMMIT_FILE
+fi
+
+
+
+## Reset back to the most recent commit, edit the commit message by appending
+## benchmark results.
+git checkout $BRANCH
+git commit --amend -F $COMMIT_FILE
diff --git a/vendor/github.com/google/gopacket/go.mod b/vendor/github.com/google/gopacket/go.mod
new file mode 100644
index 0000000..fcd84bc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.mod
@@ -0,0 +1,11 @@
+module github.com/google/gopacket
+
+go 1.12
+
+require (
+	github.com/vishvananda/netlink v1.1.0
+	github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f
+	golang.org/x/lint v0.0.0-20200302205851-738671d3881b // indirect
+	golang.org/x/net v0.0.0-20190620200207-3b0461eec859
+	golang.org/x/sys v0.0.0-20200217220822-9197077df867
+)
diff --git a/vendor/github.com/google/gopacket/go.sum b/vendor/github.com/google/gopacket/go.sum
new file mode 100644
index 0000000..20806aa
--- /dev/null
+++ b/vendor/github.com/google/gopacket/go.sum
@@ -0,0 +1,27 @@
+github.com/vishvananda/netlink v1.1.0 h1:1iyaYNBLmP6L0220aDnYQpo1QEV4t4hJ+xEEhhJH8j0=
+github.com/vishvananda/netlink v1.1.0/go.mod h1:cTgwzPIzzgDAYoQrMm0EdrjRUBkTqKYppBueQtXaqoE=
+github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df/go.mod h1:JP3t17pCcGlemwknint6hfoeCVQrEMVwxRLRjXpq+BU=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f h1:p4VB7kIXpOQvVn1ZaTIVp+3vuYAXFe3OJEvjbUYJLaA=
+github.com/vishvananda/netns v0.0.0-20210104183010-2eb08e3e575f/go.mod h1:DD4vA1DwXk04H54A1oHXtwZmA0grkVMdPxx/VGLCah0=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
+golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
+golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67 h1:1Fzlr8kkDLQwqMP8GxrhptBLqZG/EDpiATneiZHY998=
+golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d h1:+R4KGOnez64A81RvjARKc4UT5/tI9ujCIVX+P5KiHuI=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867 h1:JoRuNIf+rpHl+VhScRQQvzbHed86tKkqwPMV34T8myw=
+golang.org/x/sys v0.0.0-20200217220822-9197077df867/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7 h1:EBZoQjiKKPaLbPrbpssUfuHtwM6KV/vb4U85g/cigFY=
+golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/vendor/github.com/google/gopacket/layerclass.go b/vendor/github.com/google/gopacket/layerclass.go
new file mode 100644
index 0000000..775cd09
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layerclass.go
@@ -0,0 +1,107 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+// LayerClass is a set of LayerTypes, used for grabbing one of a number of
+// different types from a packet.
+type LayerClass interface {
+	// Contains returns true if the given layer type should be considered part
+	// of this layer class.
+	Contains(LayerType) bool
+	// LayerTypes returns the set of all layer types in this layer class.
+	// Note that this may not be a fast operation on all LayerClass
+	// implementations.
+	LayerTypes() []LayerType
+}
+
+// Contains implements LayerClass.
+func (l LayerType) Contains(a LayerType) bool {
+	return l == a
+}
+
+// LayerTypes implements LayerClass.
+func (l LayerType) LayerTypes() []LayerType {
+	return []LayerType{l}
+}
+
+// LayerClassSlice implements a LayerClass with a slice.
+type LayerClassSlice []bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (s LayerClassSlice) Contains(t LayerType) bool {
+	return int(t) < len(s) && s[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassSlice.
+// Because of LayerClassSlice's implementation, this could be quite slow.
+func (s LayerClassSlice) LayerTypes() (all []LayerType) {
+	for i := 0; i < len(s); i++ {
+		if s[i] {
+			all = append(all, LayerType(i))
+		}
+	}
+	return
+}
+
+// NewLayerClassSlice creates a new LayerClassSlice by creating a slice of
+// size max(types) and setting slice[t] to true for each type t.  Note, if
+// you implement your own LayerType and give it a high value, this WILL create
+// a very large slice.
+func NewLayerClassSlice(types []LayerType) LayerClassSlice {
+	var max LayerType
+	for _, typ := range types {
+		if typ > max {
+			max = typ
+		}
+	}
+	t := make([]bool, int(max+1))
+	for _, typ := range types {
+		t[typ] = true
+	}
+	return t
+}
+
+// LayerClassMap implements a LayerClass with a map.
+type LayerClassMap map[LayerType]bool
+
+// Contains returns true if the given layer type should be considered part
+// of this layer class.
+func (m LayerClassMap) Contains(t LayerType) bool {
+	return m[t]
+}
+
+// LayerTypes returns all layer types in this LayerClassMap.
+func (m LayerClassMap) LayerTypes() (all []LayerType) {
+	for t := range m {
+		all = append(all, t)
+	}
+	return
+}
+
+// NewLayerClassMap creates a LayerClassMap and sets map[t] to true for each
+// type in types.
+func NewLayerClassMap(types []LayerType) LayerClassMap {
+	m := LayerClassMap{}
+	for _, typ := range types {
+		m[typ] = true
+	}
+	return m
+}
+
+// NewLayerClass creates a LayerClass, attempting to be smart about which type
+// it creates based on which types are passed in.
+func NewLayerClass(types []LayerType) LayerClass {
+	for _, typ := range types {
+		if typ > maxLayerType {
+			// NewLayerClassSlice could create a very large object, so instead create
+			// a map.
+			return NewLayerClassMap(types)
+		}
+	}
+	return NewLayerClassSlice(types)
+}
diff --git a/vendor/github.com/google/gopacket/layers/.lint_blacklist b/vendor/github.com/google/gopacket/layers/.lint_blacklist
new file mode 100644
index 0000000..fded4f6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/.lint_blacklist
@@ -0,0 +1,39 @@
+dot11.go
+eap.go
+endpoints.go
+enums_generated.go
+enums.go
+ethernet.go
+geneve.go
+icmp4.go
+icmp6.go
+igmp.go
+ip4.go
+ip6.go
+layertypes.go
+linux_sll.go
+llc.go
+lldp.go
+mpls.go
+ndp.go
+ntp.go
+ospf.go
+pflog.go
+pppoe.go
+prism.go
+radiotap.go
+rudp.go
+sctp.go
+sflow.go
+tcp.go
+tcpip.go
+tls.go
+tls_alert.go
+tls_appdata.go
+tls_cipherspec.go
+tls_hanshake.go
+tls_test.go
+udp.go
+udplite.go
+usb.go
+vrrp.go
diff --git a/vendor/github.com/google/gopacket/layers/arp.go b/vendor/github.com/google/gopacket/layers/arp.go
new file mode 100644
index 0000000..0775ac0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/arp.go
@@ -0,0 +1,118 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// Potential values for ARP.Operation.
+const (
+	ARPRequest = 1
+	ARPReply   = 2
+)
+
+// ARP is a ARP packet header.
+type ARP struct {
+	BaseLayer
+	AddrType          LinkType
+	Protocol          EthernetType
+	HwAddressSize     uint8
+	ProtAddressSize   uint8
+	Operation         uint16
+	SourceHwAddress   []byte
+	SourceProtAddress []byte
+	DstHwAddress      []byte
+	DstProtAddress    []byte
+}
+
+// LayerType returns LayerTypeARP
+func (arp *ARP) LayerType() gopacket.LayerType { return LayerTypeARP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (arp *ARP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return fmt.Errorf("ARP length %d too short", len(data))
+	}
+	arp.AddrType = LinkType(binary.BigEndian.Uint16(data[0:2]))
+	arp.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+	arp.HwAddressSize = data[4]
+	arp.ProtAddressSize = data[5]
+	arp.Operation = binary.BigEndian.Uint16(data[6:8])
+	arpLength := 8 + 2*arp.HwAddressSize + 2*arp.ProtAddressSize
+	if len(data) < int(arpLength) {
+		df.SetTruncated()
+		return fmt.Errorf("ARP length %d too short, %d expected", len(data), arpLength)
+	}
+	arp.SourceHwAddress = data[8 : 8+arp.HwAddressSize]
+	arp.SourceProtAddress = data[8+arp.HwAddressSize : 8+arp.HwAddressSize+arp.ProtAddressSize]
+	arp.DstHwAddress = data[8+arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+arp.ProtAddressSize]
+	arp.DstProtAddress = data[8+2*arp.HwAddressSize+arp.ProtAddressSize : 8+2*arp.HwAddressSize+2*arp.ProtAddressSize]
+
+	arp.Contents = data[:arpLength]
+	arp.Payload = data[arpLength:]
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (arp *ARP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	size := 8 + len(arp.SourceHwAddress) + len(arp.SourceProtAddress) + len(arp.DstHwAddress) + len(arp.DstProtAddress)
+	bytes, err := b.PrependBytes(size)
+	if err != nil {
+		return err
+	}
+	if opts.FixLengths {
+		if len(arp.SourceHwAddress) != len(arp.DstHwAddress) {
+			return errors.New("mismatched hardware address sizes")
+		}
+		arp.HwAddressSize = uint8(len(arp.SourceHwAddress))
+		if len(arp.SourceProtAddress) != len(arp.DstProtAddress) {
+			return errors.New("mismatched prot address sizes")
+		}
+		arp.ProtAddressSize = uint8(len(arp.SourceProtAddress))
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(arp.AddrType))
+	binary.BigEndian.PutUint16(bytes[2:], uint16(arp.Protocol))
+	bytes[4] = arp.HwAddressSize
+	bytes[5] = arp.ProtAddressSize
+	binary.BigEndian.PutUint16(bytes[6:], arp.Operation)
+	start := 8
+	for _, addr := range [][]byte{
+		arp.SourceHwAddress,
+		arp.SourceProtAddress,
+		arp.DstHwAddress,
+		arp.DstProtAddress,
+	} {
+		copy(bytes[start:], addr)
+		start += len(addr)
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (arp *ARP) CanDecode() gopacket.LayerClass {
+	return LayerTypeARP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (arp *ARP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeARP(data []byte, p gopacket.PacketBuilder) error {
+
+	arp := &ARP{}
+	return decodingLayerDecoder(arp, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/asf.go b/vendor/github.com/google/gopacket/layers/asf.go
new file mode 100644
index 0000000..d698bd0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/asf.go
@@ -0,0 +1,166 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+// This file implements the ASF RMCP payload specified in section 3.2.2.3 of
+// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// ASFRMCPEnterprise is the IANA-assigned Enterprise Number of the ASF-RMCP.
+	ASFRMCPEnterprise uint32 = 4542
+)
+
+// ASFDataIdentifier encapsulates fields used to uniquely identify the format of
+// the data block.
+//
+// While the enterprise number is almost always 4542 (ASF-RMCP), we support
+// registering layers using structs of this type as a key in case any users are
+// using OEM-extensions.
+type ASFDataIdentifier struct {
+
+	// Enterprise is the IANA Enterprise Number associated with the entity that
+	// defines the message type. A list can be found at
+	// https://www.iana.org/assignments/enterprise-numbers/enterprise-numbers.
+	// This can be thought of as the namespace for the message type.
+	Enterprise uint32
+
+	// Type is the message type, defined by the entity associated with the
+	// enterprise above. No pressure, but in the context of EN 4542, 1 byte is
+	// the difference between sending a ping and telling a machine to do an
+	// unconditional power down (0x80 and 0x12 respectively).
+	Type uint8
+}
+
+// LayerType returns the payload layer type corresponding to an ASF message
+// type.
+func (a ASFDataIdentifier) LayerType() gopacket.LayerType {
+	if lt := asfDataLayerTypes[a]; lt != 0 {
+		return lt
+	}
+
+	// some layer types don't have a payload, e.g. ASF-RMCP Presence Ping.
+	return gopacket.LayerTypePayload
+}
+
+// RegisterASFLayerType allows specifying that the data block of ASF packets
+// with a given enterprise number and type should be processed by a given layer
+// type. This overrides any existing registrations, including defaults.
+func RegisterASFLayerType(a ASFDataIdentifier, l gopacket.LayerType) {
+	asfDataLayerTypes[a] = l
+}
+
+var (
+	// ASFDataIdentifierPresencePong is the message type of the response to a
+	// Presence Ping message. It indicates the sender is ASF-RMCP-aware.
+	ASFDataIdentifierPresencePong = ASFDataIdentifier{
+		Enterprise: ASFRMCPEnterprise,
+		Type:       0x40,
+	}
+
+	// ASFDataIdentifierPresencePing is a message type sent to a managed client
+	// to solicit a Presence Pong response. Clients may ignore this if the RMCP
+	// version is unsupported. Sending this message with a sequence number <255
+	// is the recommended way of finding out whether an implementation sends
+	// RMCP ACKs (e.g. iDRAC does, Super Micro does not).
+	//
+	// Systems implementing IPMI must respond to this ping to conform to the
+	// spec, so it is a good substitute for an ICMP ping.
+	ASFDataIdentifierPresencePing = ASFDataIdentifier{
+		Enterprise: ASFRMCPEnterprise,
+		Type:       0x80,
+	}
+
+	// asfDataLayerTypes is used to find the next layer for a given ASF header.
+	asfDataLayerTypes = map[ASFDataIdentifier]gopacket.LayerType{
+		ASFDataIdentifierPresencePong: LayerTypeASFPresencePong,
+	}
+)
+
+// ASF defines ASF's generic RMCP message Data block format. See section
+// 3.2.2.3.
+type ASF struct {
+	BaseLayer
+	ASFDataIdentifier
+
+	// Tag is used to match request/response pairs. The tag of a response is set
+	// to that of the message it is responding to. If a message is
+	// unidirectional, i.e. not part of a request/response pair, this is set to
+	// 255.
+	Tag uint8
+
+	// 1 byte reserved, set to 0x00.
+
+	// Length is the length of this layer's payload in bytes.
+	Length uint8
+}
+
+// LayerType returns LayerTypeASF. It partially satisfies Layer and
+// SerializableLayer.
+func (*ASF) LayerType() gopacket.LayerType {
+	return LayerTypeASF
+}
+
+// CanDecode returns LayerTypeASF. It partially satisfies DecodingLayer.
+func (a *ASF) CanDecode() gopacket.LayerClass {
+	return a.LayerType()
+}
+
+// DecodeFromBytes makes the layer represent the provided bytes. It partially
+// satisfies DecodingLayer.
+func (a *ASF) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return fmt.Errorf("invalid ASF data header, length %v less than 8",
+			len(data))
+	}
+
+	a.BaseLayer.Contents = data[:8]
+	a.BaseLayer.Payload = data[8:]
+
+	a.Enterprise = binary.BigEndian.Uint32(data[:4])
+	a.Type = uint8(data[4])
+	a.Tag = uint8(data[5])
+	// 1 byte reserved
+	a.Length = uint8(data[7])
+	return nil
+}
+
+// NextLayerType returns the layer type corresponding to the message type of
+// this ASF data layer. This partially satisfies DecodingLayer.
+func (a *ASF) NextLayerType() gopacket.LayerType {
+	return a.ASFDataIdentifier.LayerType()
+}
+
+// SerializeTo writes the serialized fom of this layer into the SerializeBuffer,
+// partially satisfying SerializableLayer.
+func (a *ASF) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	payload := b.Bytes()
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint32(bytes[:4], a.Enterprise)
+	bytes[4] = uint8(a.Type)
+	bytes[5] = a.Tag
+	bytes[6] = 0x00
+	if opts.FixLengths {
+		a.Length = uint8(len(payload))
+	}
+	bytes[7] = a.Length
+	return nil
+}
+
+// decodeASF decodes the byte slice into an RMCP-ASF data struct.
+func decodeASF(data []byte, p gopacket.PacketBuilder) error {
+	return decodingLayerDecoder(&ASF{}, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/asf_presencepong.go b/vendor/github.com/google/gopacket/layers/asf_presencepong.go
new file mode 100644
index 0000000..e9a8baf
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/asf_presencepong.go
@@ -0,0 +1,194 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+// This file implements the RMCP ASF Presence Pong message, specified in section
+// 3.2.4.3 of
+// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf. It
+// also contains non-competing elements from IPMI v2.0, specified in section
+// 13.2.4 of
+// https://www.intel.com/content/dam/www/public/us/en/documents/specification-updates/ipmi-intelligent-platform-mgt-interface-spec-2nd-gen-v2-0-spec-update.pdf.
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+type (
+	// ASFEntity is the type of individual entities that a Presence Pong
+	// response can indicate support of. The entities currently implemented by
+	// the spec are IPMI and ASFv1.
+	ASFEntity uint8
+
+	// ASFInteraction is the type of individual interactions that a Presence
+	// Pong response can indicate support for. The interactions currently
+	// implemented by the spec are RMCP security extensions. Although not
+	// specified, IPMI uses this field to indicate support for DASH, which is
+	// supported as well.
+	ASFInteraction uint8
+)
+
+const (
+	// ASFDCMIEnterprise is the IANA-assigned Enterprise Number of the Data
+	// Center Manageability Interface Forum. The Presence Pong response's
+	// Enterprise field being set to this value indicates support for DCMI. The
+	// DCMI spec regards the OEM field as reserved, so these should be null.
+	ASFDCMIEnterprise uint32 = 36465
+
+	// ASFPresencePongEntityIPMI ANDs with Presence Pong's supported entities
+	// field if the managed system supports IPMI.
+	ASFPresencePongEntityIPMI ASFEntity = 1 << 7
+
+	// ASFPresencePongEntityASFv1 ANDs with Presence Pong's supported entities
+	// field if the managed system supports ASF v1.0.
+	ASFPresencePongEntityASFv1 ASFEntity = 1
+
+	// ASFPresencePongInteractionSecurityExtensions ANDs with Presence Pong's
+	// supported interactions field if the managed system supports RMCP v2.0
+	// security extensions. See section 3.2.3.
+	ASFPresencePongInteractionSecurityExtensions ASFInteraction = 1 << 7
+
+	// ASFPresencePongInteractionDASH ANDs with Presence Pong's supported
+	// interactions field if the managed system supports DMTF DASH. See
+	// https://www.dmtf.org/standards/dash.
+	ASFPresencePongInteractionDASH ASFInteraction = 1 << 5
+)
+
+// ASFPresencePong defines the structure of a Presence Pong message's payload.
+// See section 3.2.4.3.
+type ASFPresencePong struct {
+	BaseLayer
+
+	// Enterprise is the IANA Enterprise Number of an entity that has defined
+	// OEM-specific capabilities for the managed client. If no such capabilities
+	// exist, this is set to ASF's IANA Enterprise Number.
+	Enterprise uint32
+
+	// OEM identifies OEM-specific capabilities. Its structure is defined by the
+	// OEM. This is set to 0s if no OEM-specific capabilities exist. This
+	// implementation does not change byte order from the wire for this field.
+	OEM [4]byte
+
+	// We break out entities and interactions into separate booleans as
+	// discovery is the entire point of this type of message, so we assume they
+	// are accessed. It also makes gopacket's default layer printing more
+	// useful.
+
+	// IPMI is true if IPMI is supported by the managed system. There is no
+	// explicit version in the specification, however given the dates, this is
+	// assumed to be IPMI v1.0.  Support for IPMI is contained in the "supported
+	// entities" field of the presence pong payload.
+	IPMI bool
+
+	// ASFv1 indicates support for ASF v1.0. This seems somewhat redundant as
+	// ASF must be supported in order to receive a response. This is contained
+	// in the "supported entities" field of the presence pong payload.
+	ASFv1 bool
+
+	// SecurityExtensions indicates support for RMCP Security Extensions,
+	// specified in ASF v2.0. This will always be false for v1.x
+	// implementations. This is contained in the "supported interactions" field
+	// of the presence pong payload. This field is defined in ASF v1.0, but has
+	// no useful value.
+	SecurityExtensions bool
+
+	// DASH is true if DMTF DASH is supported. This is not specified in ASF
+	// v2.0, but in IPMI v2.0, however the former does not preclude it, so we
+	// support it.
+	DASH bool
+
+	// 6 bytes reserved after the entities and interactions fields, set to 0s.
+}
+
+// SupportsDCMI returns whether the Presence Pong message indicates support for
+// the Data Center Management Interface, which is an extension of IPMI v2.0.
+func (a *ASFPresencePong) SupportsDCMI() bool {
+	return a.Enterprise == ASFDCMIEnterprise && a.IPMI && a.ASFv1
+}
+
+// LayerType returns LayerTypeASFPresencePong. It partially satisfies Layer and
+// SerializableLayer.
+func (*ASFPresencePong) LayerType() gopacket.LayerType {
+	return LayerTypeASFPresencePong
+}
+
+// CanDecode returns LayerTypeASFPresencePong. It partially satisfies
+// DecodingLayer.
+func (a *ASFPresencePong) CanDecode() gopacket.LayerClass {
+	return a.LayerType()
+}
+
+// DecodeFromBytes makes the layer represent the provided bytes. It partially
+// satisfies DecodingLayer.
+func (a *ASFPresencePong) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 16 {
+		df.SetTruncated()
+		return fmt.Errorf("invalid ASF presence pong payload, length %v less than 16",
+			len(data))
+	}
+
+	a.BaseLayer.Contents = data[:16]
+	a.BaseLayer.Payload = data[16:]
+
+	a.Enterprise = binary.BigEndian.Uint32(data[:4])
+	copy(a.OEM[:], data[4:8]) // N.B. no byte order change
+	a.IPMI = data[8]&uint8(ASFPresencePongEntityIPMI) != 0
+	a.ASFv1 = data[8]&uint8(ASFPresencePongEntityASFv1) != 0
+	a.SecurityExtensions = data[9]&uint8(ASFPresencePongInteractionSecurityExtensions) != 0
+	a.DASH = data[9]&uint8(ASFPresencePongInteractionDASH) != 0
+	// ignore remaining 6 bytes; should be set to 0s
+	return nil
+}
+
+// NextLayerType returns LayerTypePayload, as there are no further layers to
+// decode. This partially satisfies DecodingLayer.
+func (a *ASFPresencePong) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// SerializeTo writes the serialized fom of this layer into the SerializeBuffer,
+// partially satisfying SerializableLayer.
+func (a *ASFPresencePong) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(16)
+	if err != nil {
+		return err
+	}
+
+	binary.BigEndian.PutUint32(bytes[:4], a.Enterprise)
+
+	copy(bytes[4:8], a.OEM[:])
+
+	bytes[8] = 0
+	if a.IPMI {
+		bytes[8] |= uint8(ASFPresencePongEntityIPMI)
+	}
+	if a.ASFv1 {
+		bytes[8] |= uint8(ASFPresencePongEntityASFv1)
+	}
+
+	bytes[9] = 0
+	if a.SecurityExtensions {
+		bytes[9] |= uint8(ASFPresencePongInteractionSecurityExtensions)
+	}
+	if a.DASH {
+		bytes[9] |= uint8(ASFPresencePongInteractionDASH)
+	}
+
+	// zero-out remaining 6 bytes
+	for i := 10; i < len(bytes); i++ {
+		bytes[i] = 0x00
+	}
+
+	return nil
+}
+
+// decodeASFPresencePong decodes the byte slice into an RMCP-ASF Presence Pong
+// struct.
+func decodeASFPresencePong(data []byte, p gopacket.PacketBuilder) error {
+	return decodingLayerDecoder(&ASFPresencePong{}, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/base.go b/vendor/github.com/google/gopacket/layers/base.go
new file mode 100644
index 0000000..cd59b46
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/base.go
@@ -0,0 +1,52 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+// BaseLayer is a convenience struct which implements the LayerData and
+// LayerPayload functions of the Layer interface.
+type BaseLayer struct {
+	// Contents is the set of bytes that make up this layer.  IE: for an
+	// Ethernet packet, this would be the set of bytes making up the
+	// Ethernet frame.
+	Contents []byte
+	// Payload is the set of bytes contained by (but not part of) this
+	// Layer.  Again, to take Ethernet as an example, this would be the
+	// set of bytes encapsulated by the Ethernet protocol.
+	Payload []byte
+}
+
+// LayerContents returns the bytes of the packet layer.
+func (b *BaseLayer) LayerContents() []byte { return b.Contents }
+
+// LayerPayload returns the bytes contained within the packet layer.
+func (b *BaseLayer) LayerPayload() []byte { return b.Payload }
+
+type layerDecodingLayer interface {
+	gopacket.Layer
+	DecodeFromBytes([]byte, gopacket.DecodeFeedback) error
+	NextLayerType() gopacket.LayerType
+}
+
+func decodingLayerDecoder(d layerDecodingLayer, data []byte, p gopacket.PacketBuilder) error {
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	next := d.NextLayerType()
+	if next == gopacket.LayerTypeZero {
+		return nil
+	}
+	return p.NextDecoder(next)
+}
+
+// hacky way to zero out memory... there must be a better way?
+var lotsOfZeros [1024]byte
diff --git a/vendor/github.com/google/gopacket/layers/bfd.go b/vendor/github.com/google/gopacket/layers/bfd.go
new file mode 100644
index 0000000..43030fb
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/bfd.go
@@ -0,0 +1,481 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// BFD Control Packet Format
+// -------------------------
+// The current version of BFD's RFC (RFC 5880) contains the following
+// diagram for the BFD Control packet format:
+//
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |Vers |  Diag   |Sta|P|F|C|A|D|M|  Detect Mult  |    Length     |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                       My Discriminator                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                      Your Discriminator                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                    Desired Min TX Interval                    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                   Required Min RX Interval                    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                 Required Min Echo RX Interval                 |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//     An optional Authentication Section MAY be present:
+//
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |    Authentication Data...     |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+//     Simple Password Authentication Section Format
+//     ---------------------------------------------
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |  Auth Key ID  |  Password...  |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                              ...                              |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+//     Keyed MD5 and Meticulous Keyed MD5 Authentication Section Format
+//     ----------------------------------------------------------------
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |  Auth Key ID  |   Reserved    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                        Sequence Number                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                      Auth Key/Digest...                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                              ...                              |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//
+//     Keyed SHA1 and Meticulous Keyed SHA1 Authentication Section Format
+//     ------------------------------------------------------------------
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |   Auth Type   |   Auth Len    |  Auth Key ID  |   Reserved    |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                        Sequence Number                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                       Auth Key/Hash...                        |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                              ...                              |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+//     From https://tools.ietf.org/rfc/rfc5880.txt
+const bfdMinimumRecordSizeInBytes int = 24
+
+// BFDVersion represents the version as decoded from the BFD control message
+type BFDVersion uint8
+
+// BFDDiagnostic represents diagnostic infomation about a BFD session
+type BFDDiagnostic uint8
+
+// constants that define BFDDiagnostic flags
+const (
+	BFDDiagnosticNone               BFDDiagnostic = 0 // No Diagnostic
+	BFDDiagnosticTimeExpired        BFDDiagnostic = 1 // Control Detection Time Expired
+	BFDDiagnosticEchoFailed         BFDDiagnostic = 2 // Echo Function Failed
+	BFDDiagnosticNeighborSignalDown BFDDiagnostic = 3 // Neighbor Signaled Session Down
+	BFDDiagnosticForwardPlaneReset  BFDDiagnostic = 4 // Forwarding Plane Reset
+	BFDDiagnosticPathDown           BFDDiagnostic = 5 // Path Down
+	BFDDiagnosticConcatPathDown     BFDDiagnostic = 6 // Concatenated Path Down
+	BFDDiagnosticAdminDown          BFDDiagnostic = 7 // Administratively Down
+	BFDDiagnosticRevConcatPathDown  BFDDiagnostic = 8 // Reverse Concatenated Path Dow
+)
+
+// String returns a string version of BFDDiagnostic
+func (bd BFDDiagnostic) String() string {
+	switch bd {
+	default:
+		return "Unknown"
+	case BFDDiagnosticNone:
+		return "None"
+	case BFDDiagnosticTimeExpired:
+		return "Control Detection Time Expired"
+	case BFDDiagnosticEchoFailed:
+		return "Echo Function Failed"
+	case BFDDiagnosticNeighborSignalDown:
+		return "Neighbor Signaled Session Down"
+	case BFDDiagnosticForwardPlaneReset:
+		return "Forwarding Plane Reset"
+	case BFDDiagnosticPathDown:
+		return "Path Down"
+	case BFDDiagnosticConcatPathDown:
+		return "Concatenated Path Down"
+	case BFDDiagnosticAdminDown:
+		return "Administratively Down"
+	case BFDDiagnosticRevConcatPathDown:
+		return "Reverse Concatenated Path Down"
+	}
+}
+
+// BFDState represents the state of a BFD session
+type BFDState uint8
+
+// constants that define BFDState
+const (
+	BFDStateAdminDown BFDState = 0
+	BFDStateDown      BFDState = 1
+	BFDStateInit      BFDState = 2
+	BFDStateUp        BFDState = 3
+)
+
+// String returns a string version of BFDState
+func (s BFDState) String() string {
+	switch s {
+	default:
+		return "Unknown"
+	case BFDStateAdminDown:
+		return "Admin Down"
+	case BFDStateDown:
+		return "Down"
+	case BFDStateInit:
+		return "Init"
+	case BFDStateUp:
+		return "Up"
+	}
+}
+
+// BFDDetectMultiplier represents the negotiated transmit interval,
+// multiplied by this value, provides the Detection Time for the
+// receiving system in Asynchronous mode.
+type BFDDetectMultiplier uint8
+
+// BFDDiscriminator is a unique, nonzero discriminator value used
+// to demultiplex multiple BFD sessions between the same pair of systems.
+type BFDDiscriminator uint32
+
+// BFDTimeInterval represents a time interval in microseconds
+type BFDTimeInterval uint32
+
+// BFDAuthType represents the authentication used in the BFD session
+type BFDAuthType uint8
+
+// constants that define the BFDAuthType
+const (
+	BFDAuthTypeNone                BFDAuthType = 0 // No Auth
+	BFDAuthTypePassword            BFDAuthType = 1 // Simple Password
+	BFDAuthTypeKeyedMD5            BFDAuthType = 2 // Keyed MD5
+	BFDAuthTypeMeticulousKeyedMD5  BFDAuthType = 3 // Meticulous Keyed MD5
+	BFDAuthTypeKeyedSHA1           BFDAuthType = 4 // Keyed SHA1
+	BFDAuthTypeMeticulousKeyedSHA1 BFDAuthType = 5 // Meticulous Keyed SHA1
+)
+
+// String returns a string version of BFDAuthType
+func (at BFDAuthType) String() string {
+	switch at {
+	default:
+		return "Unknown"
+	case BFDAuthTypeNone:
+		return "No Authentication"
+	case BFDAuthTypePassword:
+		return "Simple Password"
+	case BFDAuthTypeKeyedMD5:
+		return "Keyed MD5"
+	case BFDAuthTypeMeticulousKeyedMD5:
+		return "Meticulous Keyed MD5"
+	case BFDAuthTypeKeyedSHA1:
+		return "Keyed SHA1"
+	case BFDAuthTypeMeticulousKeyedSHA1:
+		return "Meticulous Keyed SHA1"
+	}
+}
+
+// BFDAuthKeyID represents the authentication key ID in use for
+// this packet.  This allows multiple keys to be active simultaneously.
+type BFDAuthKeyID uint8
+
+// BFDAuthSequenceNumber represents the sequence number for this packet.
+// For Keyed Authentication, this value is incremented occasionally.  For
+// Meticulous Keyed Authentication, this value is incremented for each
+// successive packet transmitted for a session.  This provides protection
+// against replay attacks.
+type BFDAuthSequenceNumber uint32
+
+// BFDAuthData represents the authentication key or digest
+type BFDAuthData []byte
+
+// BFDAuthHeader represents authentication data used in the BFD session
+type BFDAuthHeader struct {
+	AuthType       BFDAuthType
+	KeyID          BFDAuthKeyID
+	SequenceNumber BFDAuthSequenceNumber
+	Data           BFDAuthData
+}
+
+// Length returns the data length of the BFDAuthHeader based on the
+// authentication type
+func (h *BFDAuthHeader) Length() int {
+	switch h.AuthType {
+	case BFDAuthTypePassword:
+		return 3 + len(h.Data)
+	case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+		return 8 + len(h.Data)
+	case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+		return 8 + len(h.Data)
+	default:
+		return 0
+	}
+}
+
+// BFD represents a BFD control message packet whose payload contains
+// the control information required to for a BFD session.
+//
+// References
+// ----------
+//
+// Wikipedia's BFD entry:
+//     https://en.wikipedia.org/wiki/Bidirectional_Forwarding_Detection
+//     This is the best place to get an overview of BFD.
+//
+// RFC 5880 "Bidirectional Forwarding Detection (BFD)" (2010)
+//     https://tools.ietf.org/html/rfc5880
+//     This is the original BFD specification.
+//
+// RFC 5881 "Bidirectional Forwarding Detection (BFD) for IPv4 and IPv6 (Single Hop)" (2010)
+//     https://tools.ietf.org/html/rfc5881
+//     Describes the use of the Bidirectional Forwarding Detection (BFD)
+//     protocol over IPv4 and IPv6 for single IP hops.
+type BFD struct {
+	BaseLayer // Stores the packet bytes and payload bytes.
+
+	Version                   BFDVersion          // Version of the BFD protocol.
+	Diagnostic                BFDDiagnostic       // Diagnostic code for last state change
+	State                     BFDState            // Current state
+	Poll                      bool                // Requesting verification
+	Final                     bool                // Responding to a received BFD Control packet that had the Poll (P) bit set.
+	ControlPlaneIndependent   bool                // BFD implementation does not share fate with its control plane
+	AuthPresent               bool                // Authentication Section is present and the session is to be authenticated
+	Demand                    bool                // Demand mode is active
+	Multipoint                bool                // For future point-to-multipoint extensions. Must always be zero
+	DetectMultiplier          BFDDetectMultiplier // Detection time multiplier
+	MyDiscriminator           BFDDiscriminator    // A unique, nonzero discriminator value
+	YourDiscriminator         BFDDiscriminator    // discriminator received from the remote system.
+	DesiredMinTxInterval      BFDTimeInterval     // Minimum interval, in microseconds,  the local system would like to use when transmitting BFD Control packets
+	RequiredMinRxInterval     BFDTimeInterval     // Minimum interval, in microseconds, between received BFD Control packets that this system is capable of supporting
+	RequiredMinEchoRxInterval BFDTimeInterval     // Minimum interval, in microseconds, between received BFD Echo packets that this system is capable of supporting
+	AuthHeader                *BFDAuthHeader      // Authentication data, variable length.
+}
+
+// Length returns the data length of a BFD Control message which
+// changes based on the presence and type of authentication
+// contained in the message
+func (d *BFD) Length() int {
+	if d.AuthPresent && (d.AuthHeader != nil) {
+		return bfdMinimumRecordSizeInBytes + d.AuthHeader.Length()
+	}
+
+	return bfdMinimumRecordSizeInBytes
+}
+
+// LayerType returns the layer type of the BFD object, which is LayerTypeBFD.
+func (d *BFD) LayerType() gopacket.LayerType {
+	return LayerTypeBFD
+}
+
+// decodeBFD analyses a byte slice and attempts to decode it as a BFD
+// control packet
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the BFD layer.
+func decodeBFD(data []byte, p gopacket.PacketBuilder) error {
+
+	// Attempt to decode the byte slice.
+	d := &BFD{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	// If the decoding worked, add the layer to the packet and set it
+	// as the application layer too, if there isn't already one.
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+
+	return nil
+}
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a BFD
+// control packet.
+//
+// Upon succeeds, it loads the BFD object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *BFD) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// If the data block is too short to be a BFD record, then return an error.
+	if len(data) < bfdMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("BFD packet too short")
+	}
+
+	pLen := uint8(data[3])
+	if len(data) != int(pLen) {
+		return errors.New("BFD packet length does not match")
+	}
+
+	// BFD type embeds type BaseLayer which contains two fields:
+	//    Contents is supposed to contain the bytes of the data at this level.
+	//    Payload is supposed to contain the payload of this level.
+	// Here we set the baselayer to be the bytes of the BFD record.
+	d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+	// Extract the fields from the block of bytes.
+	// To make sense of this, refer to the packet diagram
+	// above and the section on endian conventions.
+
+	// The first few fields are all packed into the first 32 bits. Unpack them.
+	d.Version = BFDVersion(((data[0] & 0xE0) >> 5))
+	d.Diagnostic = BFDDiagnostic(data[0] & 0x1F)
+	data = data[1:]
+
+	d.State = BFDState((data[0] & 0xC0) >> 6)
+	d.Poll = data[0]&0x20 != 0
+	d.Final = data[0]&0x10 != 0
+	d.ControlPlaneIndependent = data[0]&0x08 != 0
+	d.AuthPresent = data[0]&0x04 != 0
+	d.Demand = data[0]&0x02 != 0
+	d.Multipoint = data[0]&0x01 != 0
+	data = data[1:]
+
+	data, d.DetectMultiplier = data[1:], BFDDetectMultiplier(data[0])
+	data, _ = data[1:], uint8(data[0]) // Consume length
+
+	// The remaining fields can just be copied in big endian order.
+	data, d.MyDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+	data, d.YourDiscriminator = data[4:], BFDDiscriminator(binary.BigEndian.Uint32(data[:4]))
+	data, d.DesiredMinTxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+	data, d.RequiredMinRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+	data, d.RequiredMinEchoRxInterval = data[4:], BFDTimeInterval(binary.BigEndian.Uint32(data[:4]))
+
+	if d.AuthPresent && (len(data) > 2) {
+		d.AuthHeader = &BFDAuthHeader{}
+		data, d.AuthHeader.AuthType = data[1:], BFDAuthType(data[0])
+		data, _ = data[1:], uint8(data[0]) // Consume length
+		data, d.AuthHeader.KeyID = data[1:], BFDAuthKeyID(data[0])
+
+		switch d.AuthHeader.AuthType {
+		case BFDAuthTypePassword:
+			d.AuthHeader.Data = BFDAuthData(data)
+		case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+			// Skipped reserved byte
+			data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+			d.AuthHeader.Data = BFDAuthData(data)
+		case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+			// Skipped reserved byte
+			data, d.AuthHeader.SequenceNumber = data[5:], BFDAuthSequenceNumber(binary.BigEndian.Uint32(data[1:5]))
+			d.AuthHeader.Data = BFDAuthData(data)
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *BFD) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	data, err := b.PrependBytes(bfdMinimumRecordSizeInBytes)
+	if err != nil {
+		return err
+	}
+
+	// Pack the first few fields into the first 32 bits.
+	data[0] = byte(byte(d.Version<<5) | byte(d.Diagnostic))
+	h := uint8(0)
+	h |= (uint8(d.State) << 6)
+	h |= (uint8(bool2uint8(d.Poll)) << 5)
+	h |= (uint8(bool2uint8(d.Final)) << 4)
+	h |= (uint8(bool2uint8(d.ControlPlaneIndependent)) << 3)
+	h |= (uint8(bool2uint8(d.AuthPresent)) << 2)
+	h |= (uint8(bool2uint8(d.Demand)) << 1)
+	h |= uint8(bool2uint8(d.Multipoint))
+	data[1] = byte(h)
+	data[2] = byte(d.DetectMultiplier)
+	data[3] = byte(d.Length())
+
+	// The remaining fields can just be copied in big endian order.
+	binary.BigEndian.PutUint32(data[4:], uint32(d.MyDiscriminator))
+	binary.BigEndian.PutUint32(data[8:], uint32(d.YourDiscriminator))
+	binary.BigEndian.PutUint32(data[12:], uint32(d.DesiredMinTxInterval))
+	binary.BigEndian.PutUint32(data[16:], uint32(d.RequiredMinRxInterval))
+	binary.BigEndian.PutUint32(data[20:], uint32(d.RequiredMinEchoRxInterval))
+
+	if d.AuthPresent && (d.AuthHeader != nil) {
+		auth, err := b.AppendBytes(int(d.AuthHeader.Length()))
+		if err != nil {
+			return err
+		}
+
+		auth[0] = byte(d.AuthHeader.AuthType)
+		auth[1] = byte(d.AuthHeader.Length())
+		auth[2] = byte(d.AuthHeader.KeyID)
+
+		switch d.AuthHeader.AuthType {
+		case BFDAuthTypePassword:
+			copy(auth[3:], d.AuthHeader.Data)
+		case BFDAuthTypeKeyedMD5, BFDAuthTypeMeticulousKeyedMD5:
+			auth[3] = byte(0)
+			binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+			copy(auth[8:], d.AuthHeader.Data)
+		case BFDAuthTypeKeyedSHA1, BFDAuthTypeMeticulousKeyedSHA1:
+			auth[3] = byte(0)
+			binary.BigEndian.PutUint32(auth[4:], uint32(d.AuthHeader.SequenceNumber))
+			copy(auth[8:], d.AuthHeader.Data)
+		}
+	}
+
+	return nil
+}
+
+// CanDecode returns a set of layers that BFD objects can decode.
+// As BFD objects can only decide the BFD layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *BFD) CanDecode() gopacket.LayerClass {
+	return LayerTypeBFD
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (BFD) layer. As BFD packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *BFD) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// Payload returns an empty byte slice as BFD packets do not carry a payload
+func (d *BFD) Payload() []byte {
+	return nil
+}
+
+// bool2uint8 converts a bool to uint8
+func bool2uint8(b bool) uint8 {
+	if b {
+		return 1
+	}
+	return 0
+}
diff --git a/vendor/github.com/google/gopacket/layers/bitfield.go b/vendor/github.com/google/gopacket/layers/bitfield.go
new file mode 100644
index 0000000..7d54acf
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/bitfield.go
@@ -0,0 +1,18 @@
+// Copyright 2021 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+type bitfield [1024]uint64
+
+// set sets bit i in bitfield b to 1.
+func (b *bitfield) set(i uint16) {
+	b[i>>6] |= (1 << (i & 0x3f))
+}
+
+// has reports whether bit i is set to 1 in bitfield b.
+func (b *bitfield) has(i uint16) bool {
+	return b[i>>6]&(1<<(i&0x3f)) != 0
+}
diff --git a/vendor/github.com/google/gopacket/layers/cdp.go b/vendor/github.com/google/gopacket/layers/cdp.go
new file mode 100644
index 0000000..095f926
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/cdp.go
@@ -0,0 +1,659 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+//   http://search.cpan.org/~mchapman/Net-CDP-0.09/lib/Net/CDP.pm
+//   https://code.google.com/p/ladvd/
+//   http://anonsvn.wireshark.org/viewvc/releases/wireshark-1.8.6/epan/dissectors/packet-cdp.c
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// CDPTLVType is the type of each TLV value in a CiscoDiscovery packet.
+type CDPTLVType uint16
+
+// CDPTLVType values.
+const (
+	CDPTLVDevID              CDPTLVType = 0x0001
+	CDPTLVAddress            CDPTLVType = 0x0002
+	CDPTLVPortID             CDPTLVType = 0x0003
+	CDPTLVCapabilities       CDPTLVType = 0x0004
+	CDPTLVVersion            CDPTLVType = 0x0005
+	CDPTLVPlatform           CDPTLVType = 0x0006
+	CDPTLVIPPrefix           CDPTLVType = 0x0007
+	CDPTLVHello              CDPTLVType = 0x0008
+	CDPTLVVTPDomain          CDPTLVType = 0x0009
+	CDPTLVNativeVLAN         CDPTLVType = 0x000a
+	CDPTLVFullDuplex         CDPTLVType = 0x000b
+	CDPTLVVLANReply          CDPTLVType = 0x000e
+	CDPTLVVLANQuery          CDPTLVType = 0x000f
+	CDPTLVPower              CDPTLVType = 0x0010
+	CDPTLVMTU                CDPTLVType = 0x0011
+	CDPTLVExtendedTrust      CDPTLVType = 0x0012
+	CDPTLVUntrustedCOS       CDPTLVType = 0x0013
+	CDPTLVSysName            CDPTLVType = 0x0014
+	CDPTLVSysOID             CDPTLVType = 0x0015
+	CDPTLVMgmtAddresses      CDPTLVType = 0x0016
+	CDPTLVLocation           CDPTLVType = 0x0017
+	CDPTLVExternalPortID     CDPTLVType = 0x0018
+	CDPTLVPowerRequested     CDPTLVType = 0x0019
+	CDPTLVPowerAvailable     CDPTLVType = 0x001a
+	CDPTLVPortUnidirectional CDPTLVType = 0x001b
+	CDPTLVEnergyWise         CDPTLVType = 0x001d
+	CDPTLVSparePairPOE       CDPTLVType = 0x001f
+)
+
+// CiscoDiscoveryValue is a TLV value inside a CiscoDiscovery packet layer.
+type CiscoDiscoveryValue struct {
+	Type   CDPTLVType
+	Length uint16
+	Value  []byte
+}
+
+// CiscoDiscovery is a packet layer containing the Cisco Discovery Protocol.
+// See http://www.cisco.com/univercd/cc/td/doc/product/lan/trsrb/frames.htm#31885
+type CiscoDiscovery struct {
+	BaseLayer
+	Version  byte
+	TTL      byte
+	Checksum uint16
+	Values   []CiscoDiscoveryValue
+}
+
+// CDPCapability is the set of capabilities advertised by a CDP device.
+type CDPCapability uint32
+
+// CDPCapability values.
+const (
+	CDPCapMaskRouter     CDPCapability = 0x0001
+	CDPCapMaskTBBridge   CDPCapability = 0x0002
+	CDPCapMaskSPBridge   CDPCapability = 0x0004
+	CDPCapMaskSwitch     CDPCapability = 0x0008
+	CDPCapMaskHost       CDPCapability = 0x0010
+	CDPCapMaskIGMPFilter CDPCapability = 0x0020
+	CDPCapMaskRepeater   CDPCapability = 0x0040
+	CDPCapMaskPhone      CDPCapability = 0x0080
+	CDPCapMaskRemote     CDPCapability = 0x0100
+)
+
+// CDPCapabilities represents the capabilities of a device
+type CDPCapabilities struct {
+	L3Router        bool
+	TBBridge        bool
+	SPBridge        bool
+	L2Switch        bool
+	IsHost          bool
+	IGMPFilter      bool
+	L1Repeater      bool
+	IsPhone         bool
+	RemotelyManaged bool
+}
+
+// CDP Power-over-Ethernet values.
+const (
+	CDPPoEFourWire  byte = 0x01
+	CDPPoEPDArch    byte = 0x02
+	CDPPoEPDRequest byte = 0x04
+	CDPPoEPSE       byte = 0x08
+)
+
+// CDPSparePairPoE provides information on PoE.
+type CDPSparePairPoE struct {
+	PSEFourWire  bool // Supported / Not supported
+	PDArchShared bool // Shared / Independent
+	PDRequestOn  bool // On / Off
+	PSEOn        bool // On / Off
+}
+
+// CDPVLANDialogue encapsulates a VLAN Query/Reply
+type CDPVLANDialogue struct {
+	ID   uint8
+	VLAN uint16
+}
+
+// CDPPowerDialogue encapsulates a Power Query/Reply
+type CDPPowerDialogue struct {
+	ID     uint16
+	MgmtID uint16
+	Values []uint32
+}
+
+// CDPLocation provides location information for a CDP device.
+type CDPLocation struct {
+	Type     uint8 // Undocumented
+	Location string
+}
+
+// CDPHello is a Cisco Hello message (undocumented, hence the "Unknown" fields)
+type CDPHello struct {
+	OUI              []byte
+	ProtocolID       uint16
+	ClusterMaster    net.IP
+	Unknown1         net.IP
+	Version          byte
+	SubVersion       byte
+	Status           byte
+	Unknown2         byte
+	ClusterCommander net.HardwareAddr
+	SwitchMAC        net.HardwareAddr
+	Unknown3         byte
+	ManagementVLAN   uint16
+}
+
+// CDPEnergyWiseSubtype is used within CDP to define TLV values.
+type CDPEnergyWiseSubtype uint32
+
+// CDPEnergyWiseSubtype values.
+const (
+	CDPEnergyWiseRole    CDPEnergyWiseSubtype = 0x00000007
+	CDPEnergyWiseDomain  CDPEnergyWiseSubtype = 0x00000008
+	CDPEnergyWiseName    CDPEnergyWiseSubtype = 0x00000009
+	CDPEnergyWiseReplyTo CDPEnergyWiseSubtype = 0x00000017
+)
+
+// CDPEnergyWise is used by CDP to monitor and control power usage.
+type CDPEnergyWise struct {
+	EncryptedData  []byte
+	Unknown1       uint32
+	SequenceNumber uint32
+	ModelNumber    string
+	Unknown2       uint16
+	HardwareID     string
+	SerialNum      string
+	Unknown3       []byte
+	Role           string
+	Domain         string
+	Name           string
+	ReplyUnknown1  []byte
+	ReplyPort      []byte
+	ReplyAddress   []byte
+	ReplyUnknown2  []byte
+	ReplyUnknown3  []byte
+}
+
+// CiscoDiscoveryInfo represents the decoded details for a set of CiscoDiscoveryValues
+type CiscoDiscoveryInfo struct {
+	BaseLayer
+	CDPHello
+	DeviceID         string
+	Addresses        []net.IP
+	PortID           string
+	Capabilities     CDPCapabilities
+	Version          string
+	Platform         string
+	IPPrefixes       []net.IPNet
+	VTPDomain        string
+	NativeVLAN       uint16
+	FullDuplex       bool
+	VLANReply        CDPVLANDialogue
+	VLANQuery        CDPVLANDialogue
+	PowerConsumption uint16
+	MTU              uint32
+	ExtendedTrust    uint8
+	UntrustedCOS     uint8
+	SysName          string
+	SysOID           string
+	MgmtAddresses    []net.IP
+	Location         CDPLocation
+	PowerRequest     CDPPowerDialogue
+	PowerAvailable   CDPPowerDialogue
+	SparePairPoe     CDPSparePairPoE
+	EnergyWise       CDPEnergyWise
+	Unknown          []CiscoDiscoveryValue
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscovery.
+func (c *CiscoDiscovery) LayerType() gopacket.LayerType {
+	return LayerTypeCiscoDiscovery
+}
+
+func decodeCiscoDiscovery(data []byte, p gopacket.PacketBuilder) error {
+	c := &CiscoDiscovery{
+		Version:  data[0],
+		TTL:      data[1],
+		Checksum: binary.BigEndian.Uint16(data[2:4]),
+	}
+	if c.Version != 1 && c.Version != 2 {
+		return fmt.Errorf("Invalid CiscoDiscovery version number %d", c.Version)
+	}
+	var err error
+	c.Values, err = decodeCiscoDiscoveryTLVs(data[4:], p)
+	if err != nil {
+		return err
+	}
+	c.Contents = data[0:4]
+	c.Payload = data[4:]
+	p.AddLayer(c)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeCiscoDiscoveryInfo))
+}
+
+// LayerType returns gopacket.LayerTypeCiscoDiscoveryInfo.
+func (c *CiscoDiscoveryInfo) LayerType() gopacket.LayerType {
+	return LayerTypeCiscoDiscoveryInfo
+}
+
+func decodeCiscoDiscoveryTLVs(data []byte, p gopacket.PacketBuilder) (values []CiscoDiscoveryValue, err error) {
+	for len(data) > 0 {
+		if len(data) < 4 {
+			p.SetTruncated()
+			return nil, errors.New("CDP TLV < 4 bytes")
+		}
+		val := CiscoDiscoveryValue{
+			Type:   CDPTLVType(binary.BigEndian.Uint16(data[:2])),
+			Length: binary.BigEndian.Uint16(data[2:4]),
+		}
+		if val.Length < 4 {
+			err = fmt.Errorf("Invalid CiscoDiscovery value length %d", val.Length)
+			break
+		} else if len(data) < int(val.Length) {
+			p.SetTruncated()
+			return nil, fmt.Errorf("CDP TLV < length %d", val.Length)
+		}
+		val.Value = data[4:val.Length]
+		values = append(values, val)
+		data = data[val.Length:]
+	}
+	return
+}
+
+func decodeCiscoDiscoveryInfo(data []byte, p gopacket.PacketBuilder) error {
+	var err error
+	info := &CiscoDiscoveryInfo{BaseLayer: BaseLayer{Contents: data}}
+	p.AddLayer(info)
+	values, err := decodeCiscoDiscoveryTLVs(data, p)
+	if err != nil { // Unlikely, as parent decode will fail, but better safe...
+		return err
+	}
+	for _, val := range values {
+		switch val.Type {
+		case CDPTLVDevID:
+			info.DeviceID = string(val.Value)
+		case CDPTLVAddress:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.Addresses, err = decodeAddresses(val.Value)
+			if err != nil {
+				return err
+			}
+		case CDPTLVPortID:
+			info.PortID = string(val.Value)
+		case CDPTLVCapabilities:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			val := CDPCapability(binary.BigEndian.Uint32(val.Value[0:4]))
+			info.Capabilities.L3Router = (val&CDPCapMaskRouter > 0)
+			info.Capabilities.TBBridge = (val&CDPCapMaskTBBridge > 0)
+			info.Capabilities.SPBridge = (val&CDPCapMaskSPBridge > 0)
+			info.Capabilities.L2Switch = (val&CDPCapMaskSwitch > 0)
+			info.Capabilities.IsHost = (val&CDPCapMaskHost > 0)
+			info.Capabilities.IGMPFilter = (val&CDPCapMaskIGMPFilter > 0)
+			info.Capabilities.L1Repeater = (val&CDPCapMaskRepeater > 0)
+			info.Capabilities.IsPhone = (val&CDPCapMaskPhone > 0)
+			info.Capabilities.RemotelyManaged = (val&CDPCapMaskRemote > 0)
+		case CDPTLVVersion:
+			info.Version = string(val.Value)
+		case CDPTLVPlatform:
+			info.Platform = string(val.Value)
+		case CDPTLVIPPrefix:
+			v := val.Value
+			l := len(v)
+			if l%5 == 0 && l >= 5 {
+				for len(v) > 0 {
+					_, ipnet, _ := net.ParseCIDR(fmt.Sprintf("%d.%d.%d.%d/%d", v[0], v[1], v[2], v[3], v[4]))
+					info.IPPrefixes = append(info.IPPrefixes, *ipnet)
+					v = v[5:]
+				}
+			} else {
+				return fmt.Errorf("Invalid TLV %v length %d", val.Type, len(val.Value))
+			}
+		case CDPTLVHello:
+			if err = checkCDPTLVLen(val, 32); err != nil {
+				return err
+			}
+			v := val.Value
+			info.CDPHello.OUI = v[0:3]
+			info.CDPHello.ProtocolID = binary.BigEndian.Uint16(v[3:5])
+			info.CDPHello.ClusterMaster = v[5:9]
+			info.CDPHello.Unknown1 = v[9:13]
+			info.CDPHello.Version = v[13]
+			info.CDPHello.SubVersion = v[14]
+			info.CDPHello.Status = v[15]
+			info.CDPHello.Unknown2 = v[16]
+			info.CDPHello.ClusterCommander = v[17:23]
+			info.CDPHello.SwitchMAC = v[23:29]
+			info.CDPHello.Unknown3 = v[29]
+			info.CDPHello.ManagementVLAN = binary.BigEndian.Uint16(v[30:32])
+		case CDPTLVVTPDomain:
+			info.VTPDomain = string(val.Value)
+		case CDPTLVNativeVLAN:
+			if err = checkCDPTLVLen(val, 2); err != nil {
+				return err
+			}
+			info.NativeVLAN = binary.BigEndian.Uint16(val.Value[0:2])
+		case CDPTLVFullDuplex:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			info.FullDuplex = (val.Value[0] == 1)
+		case CDPTLVVLANReply:
+			if err = checkCDPTLVLen(val, 3); err != nil {
+				return err
+			}
+			info.VLANReply.ID = uint8(val.Value[0])
+			info.VLANReply.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+		case CDPTLVVLANQuery:
+			if err = checkCDPTLVLen(val, 3); err != nil {
+				return err
+			}
+			info.VLANQuery.ID = uint8(val.Value[0])
+			info.VLANQuery.VLAN = binary.BigEndian.Uint16(val.Value[1:3])
+		case CDPTLVPower:
+			if err = checkCDPTLVLen(val, 2); err != nil {
+				return err
+			}
+			info.PowerConsumption = binary.BigEndian.Uint16(val.Value[0:2])
+		case CDPTLVMTU:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.MTU = binary.BigEndian.Uint32(val.Value[0:4])
+		case CDPTLVExtendedTrust:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			info.ExtendedTrust = uint8(val.Value[0])
+		case CDPTLVUntrustedCOS:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			info.UntrustedCOS = uint8(val.Value[0])
+		case CDPTLVSysName:
+			info.SysName = string(val.Value)
+		case CDPTLVSysOID:
+			info.SysOID = string(val.Value)
+		case CDPTLVMgmtAddresses:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.MgmtAddresses, err = decodeAddresses(val.Value)
+			if err != nil {
+				return err
+			}
+		case CDPTLVLocation:
+			if err = checkCDPTLVLen(val, 2); err != nil {
+				return err
+			}
+			info.Location.Type = uint8(val.Value[0])
+			info.Location.Location = string(val.Value[1:])
+
+			//		case CDPTLVLExternalPortID:
+			//			Undocumented
+		case CDPTLVPowerRequested:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.PowerRequest.ID = binary.BigEndian.Uint16(val.Value[0:2])
+			info.PowerRequest.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+			for n := 4; n < len(val.Value); n += 4 {
+				info.PowerRequest.Values = append(info.PowerRequest.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+			}
+		case CDPTLVPowerAvailable:
+			if err = checkCDPTLVLen(val, 4); err != nil {
+				return err
+			}
+			info.PowerAvailable.ID = binary.BigEndian.Uint16(val.Value[0:2])
+			info.PowerAvailable.MgmtID = binary.BigEndian.Uint16(val.Value[2:4])
+			for n := 4; n < len(val.Value); n += 4 {
+				info.PowerAvailable.Values = append(info.PowerAvailable.Values, binary.BigEndian.Uint32(val.Value[n:n+4]))
+			}
+			//		case CDPTLVPortUnidirectional
+			//			Undocumented
+		case CDPTLVEnergyWise:
+			if err = checkCDPTLVLen(val, 72); err != nil {
+				return err
+			}
+			info.EnergyWise.EncryptedData = val.Value[0:20]
+			info.EnergyWise.Unknown1 = binary.BigEndian.Uint32(val.Value[20:24])
+			info.EnergyWise.SequenceNumber = binary.BigEndian.Uint32(val.Value[24:28])
+			info.EnergyWise.ModelNumber = string(val.Value[28:44])
+			info.EnergyWise.Unknown2 = binary.BigEndian.Uint16(val.Value[44:46])
+			info.EnergyWise.HardwareID = string(val.Value[46:49])
+			info.EnergyWise.SerialNum = string(val.Value[49:60])
+			info.EnergyWise.Unknown3 = val.Value[60:68]
+			tlvLen := binary.BigEndian.Uint16(val.Value[68:70])
+			tlvNum := binary.BigEndian.Uint16(val.Value[70:72])
+			data := val.Value[72:]
+			if len(data) < int(tlvLen) {
+				return fmt.Errorf("Invalid TLV length %d vs %d", tlvLen, len(data))
+			}
+			numSeen := 0
+			for len(data) > 8 {
+				numSeen++
+				if numSeen > int(tlvNum) { // Too many TLV's ?
+					return fmt.Errorf("Too many TLV's - wanted %d, saw %d", tlvNum, numSeen)
+				}
+				tType := CDPEnergyWiseSubtype(binary.BigEndian.Uint32(data[0:4]))
+				tLen := int(binary.BigEndian.Uint32(data[4:8]))
+				if tLen > len(data)-8 {
+					return fmt.Errorf("Invalid TLV length %d vs %d", tLen, len(data)-8)
+				}
+				data = data[8:]
+				switch tType {
+				case CDPEnergyWiseRole:
+					info.EnergyWise.Role = string(data[:])
+				case CDPEnergyWiseDomain:
+					info.EnergyWise.Domain = string(data[:])
+				case CDPEnergyWiseName:
+					info.EnergyWise.Name = string(data[:])
+				case CDPEnergyWiseReplyTo:
+					if len(data) >= 18 {
+						info.EnergyWise.ReplyUnknown1 = data[0:2]
+						info.EnergyWise.ReplyPort = data[2:4]
+						info.EnergyWise.ReplyAddress = data[4:8]
+						info.EnergyWise.ReplyUnknown2 = data[8:10]
+						info.EnergyWise.ReplyUnknown3 = data[10:14]
+					}
+				}
+				data = data[tLen:]
+			}
+		case CDPTLVSparePairPOE:
+			if err = checkCDPTLVLen(val, 1); err != nil {
+				return err
+			}
+			v := val.Value[0]
+			info.SparePairPoe.PSEFourWire = (v&CDPPoEFourWire > 0)
+			info.SparePairPoe.PDArchShared = (v&CDPPoEPDArch > 0)
+			info.SparePairPoe.PDRequestOn = (v&CDPPoEPDRequest > 0)
+			info.SparePairPoe.PSEOn = (v&CDPPoEPSE > 0)
+		default:
+			info.Unknown = append(info.Unknown, val)
+		}
+	}
+	return nil
+}
+
+// CDP Protocol Types
+const (
+	CDPProtocolTypeNLPID byte = 1
+	CDPProtocolType802_2 byte = 2
+)
+
+// CDPAddressType is used to define TLV values within CDP addresses.
+type CDPAddressType uint64
+
+// CDP Address types.
+const (
+	CDPAddressTypeCLNP      CDPAddressType = 0x81
+	CDPAddressTypeIPV4      CDPAddressType = 0xcc
+	CDPAddressTypeIPV6      CDPAddressType = 0xaaaa030000000800
+	CDPAddressTypeDECNET    CDPAddressType = 0xaaaa030000006003
+	CDPAddressTypeAPPLETALK CDPAddressType = 0xaaaa03000000809b
+	CDPAddressTypeIPX       CDPAddressType = 0xaaaa030000008137
+	CDPAddressTypeVINES     CDPAddressType = 0xaaaa0300000080c4
+	CDPAddressTypeXNS       CDPAddressType = 0xaaaa030000000600
+	CDPAddressTypeAPOLLO    CDPAddressType = 0xaaaa030000008019
+)
+
+func decodeAddresses(v []byte) (addresses []net.IP, err error) {
+	numaddr := int(binary.BigEndian.Uint32(v[0:4]))
+	if numaddr < 1 {
+		return nil, fmt.Errorf("Invalid Address TLV number %d", numaddr)
+	}
+	v = v[4:]
+	if len(v) < numaddr*8 {
+		return nil, fmt.Errorf("Invalid Address TLV length %d", len(v))
+	}
+	for i := 0; i < numaddr; i++ {
+		prottype := v[0]
+		if prottype != CDPProtocolTypeNLPID && prottype != CDPProtocolType802_2 { // invalid protocol type
+			return nil, fmt.Errorf("Invalid Address Protocol %d", prottype)
+		}
+		protlen := int(v[1])
+		if (prottype == CDPProtocolTypeNLPID && protlen != 1) ||
+			(prottype == CDPProtocolType802_2 && protlen != 3 && protlen != 8) { // invalid length
+			return nil, fmt.Errorf("Invalid Address Protocol length %d", protlen)
+		}
+		plen := make([]byte, 8)
+		copy(plen[8-protlen:], v[2:2+protlen])
+		protocol := CDPAddressType(binary.BigEndian.Uint64(plen))
+		v = v[2+protlen:]
+		addrlen := binary.BigEndian.Uint16(v[0:2])
+		ab := v[2 : 2+addrlen]
+		if protocol == CDPAddressTypeIPV4 && addrlen == 4 {
+			addresses = append(addresses, net.IPv4(ab[0], ab[1], ab[2], ab[3]))
+		} else if protocol == CDPAddressTypeIPV6 && addrlen == 16 {
+			addresses = append(addresses, net.IP(ab))
+		} else {
+			// only handle IPV4 & IPV6 for now
+		}
+		v = v[2+addrlen:]
+		if len(v) < 8 {
+			break
+		}
+	}
+	return
+}
+
+func (t CDPTLVType) String() (s string) {
+	switch t {
+	case CDPTLVDevID:
+		s = "Device ID"
+	case CDPTLVAddress:
+		s = "Addresses"
+	case CDPTLVPortID:
+		s = "Port ID"
+	case CDPTLVCapabilities:
+		s = "Capabilities"
+	case CDPTLVVersion:
+		s = "Software Version"
+	case CDPTLVPlatform:
+		s = "Platform"
+	case CDPTLVIPPrefix:
+		s = "IP Prefix"
+	case CDPTLVHello:
+		s = "Protocol Hello"
+	case CDPTLVVTPDomain:
+		s = "VTP Management Domain"
+	case CDPTLVNativeVLAN:
+		s = "Native VLAN"
+	case CDPTLVFullDuplex:
+		s = "Full Duplex"
+	case CDPTLVVLANReply:
+		s = "VoIP VLAN Reply"
+	case CDPTLVVLANQuery:
+		s = "VLANQuery"
+	case CDPTLVPower:
+		s = "Power consumption"
+	case CDPTLVMTU:
+		s = "MTU"
+	case CDPTLVExtendedTrust:
+		s = "Extended Trust Bitmap"
+	case CDPTLVUntrustedCOS:
+		s = "Untrusted Port CoS"
+	case CDPTLVSysName:
+		s = "System Name"
+	case CDPTLVSysOID:
+		s = "System OID"
+	case CDPTLVMgmtAddresses:
+		s = "Management Addresses"
+	case CDPTLVLocation:
+		s = "Location"
+	case CDPTLVExternalPortID:
+		s = "External Port ID"
+	case CDPTLVPowerRequested:
+		s = "Power Requested"
+	case CDPTLVPowerAvailable:
+		s = "Power Available"
+	case CDPTLVPortUnidirectional:
+		s = "Port Unidirectional"
+	case CDPTLVEnergyWise:
+		s = "Energy Wise"
+	case CDPTLVSparePairPOE:
+		s = "Spare Pair POE"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (a CDPAddressType) String() (s string) {
+	switch a {
+	case CDPAddressTypeCLNP:
+		s = "Connectionless Network Protocol"
+	case CDPAddressTypeIPV4:
+		s = "IPv4"
+	case CDPAddressTypeIPV6:
+		s = "IPv6"
+	case CDPAddressTypeDECNET:
+		s = "DECnet Phase IV"
+	case CDPAddressTypeAPPLETALK:
+		s = "Apple Talk"
+	case CDPAddressTypeIPX:
+		s = "Novell IPX"
+	case CDPAddressTypeVINES:
+		s = "Banyan VINES"
+	case CDPAddressTypeXNS:
+		s = "Xerox Network Systems"
+	case CDPAddressTypeAPOLLO:
+		s = "Apollo"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t CDPEnergyWiseSubtype) String() (s string) {
+	switch t {
+	case CDPEnergyWiseRole:
+		s = "Role"
+	case CDPEnergyWiseDomain:
+		s = "Domain"
+	case CDPEnergyWiseName:
+		s = "Name"
+	case CDPEnergyWiseReplyTo:
+		s = "ReplyTo"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func checkCDPTLVLen(v CiscoDiscoveryValue, l int) (err error) {
+	if len(v.Value) < l {
+		err = fmt.Errorf("Invalid TLV %v length %d", v.Type, len(v.Value))
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ctp.go b/vendor/github.com/google/gopacket/layers/ctp.go
new file mode 100644
index 0000000..8287584
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ctp.go
@@ -0,0 +1,109 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// EthernetCTPFunction is the function code used by the EthernetCTP protocol to identify each
+// EthernetCTP layer.
+type EthernetCTPFunction uint16
+
+// EthernetCTPFunction values.
+const (
+	EthernetCTPFunctionReply       EthernetCTPFunction = 1
+	EthernetCTPFunctionForwardData EthernetCTPFunction = 2
+)
+
+// EthernetCTP implements the EthernetCTP protocol, see http://www.mit.edu/people/jhawk/ctp.html.
+// We split EthernetCTP up into the top-level EthernetCTP layer, followed by zero or more
+// EthernetCTPForwardData layers, followed by a final EthernetCTPReply layer.
+type EthernetCTP struct {
+	BaseLayer
+	SkipCount uint16
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTP.
+func (c *EthernetCTP) LayerType() gopacket.LayerType {
+	return LayerTypeEthernetCTP
+}
+
+// EthernetCTPForwardData is the ForwardData layer inside EthernetCTP.  See EthernetCTP's docs for more
+// details.
+type EthernetCTPForwardData struct {
+	BaseLayer
+	Function       EthernetCTPFunction
+	ForwardAddress []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPForwardData.
+func (c *EthernetCTPForwardData) LayerType() gopacket.LayerType {
+	return LayerTypeEthernetCTPForwardData
+}
+
+// ForwardEndpoint returns the EthernetCTPForwardData ForwardAddress as an endpoint.
+func (c *EthernetCTPForwardData) ForwardEndpoint() gopacket.Endpoint {
+	return gopacket.NewEndpoint(EndpointMAC, c.ForwardAddress)
+}
+
+// EthernetCTPReply is the Reply layer inside EthernetCTP.  See EthernetCTP's docs for more details.
+type EthernetCTPReply struct {
+	BaseLayer
+	Function      EthernetCTPFunction
+	ReceiptNumber uint16
+	Data          []byte
+}
+
+// LayerType returns gopacket.LayerTypeEthernetCTPReply.
+func (c *EthernetCTPReply) LayerType() gopacket.LayerType {
+	return LayerTypeEthernetCTPReply
+}
+
+// Payload returns the EthernetCTP reply's Data bytes.
+func (c *EthernetCTPReply) Payload() []byte { return c.Data }
+
+func decodeEthernetCTP(data []byte, p gopacket.PacketBuilder) error {
+	c := &EthernetCTP{
+		SkipCount: binary.LittleEndian.Uint16(data[:2]),
+		BaseLayer: BaseLayer{data[:2], data[2:]},
+	}
+	if c.SkipCount%2 != 0 {
+		return fmt.Errorf("EthernetCTP skip count is odd: %d", c.SkipCount)
+	}
+	p.AddLayer(c)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+}
+
+// decodeEthernetCTPFromFunctionType reads in the first 2 bytes to determine the EthernetCTP
+// layer type to decode next, then decodes based on that.
+func decodeEthernetCTPFromFunctionType(data []byte, p gopacket.PacketBuilder) error {
+	function := EthernetCTPFunction(binary.LittleEndian.Uint16(data[:2]))
+	switch function {
+	case EthernetCTPFunctionReply:
+		reply := &EthernetCTPReply{
+			Function:      function,
+			ReceiptNumber: binary.LittleEndian.Uint16(data[2:4]),
+			Data:          data[4:],
+			BaseLayer:     BaseLayer{data, nil},
+		}
+		p.AddLayer(reply)
+		p.SetApplicationLayer(reply)
+		return nil
+	case EthernetCTPFunctionForwardData:
+		forward := &EthernetCTPForwardData{
+			Function:       function,
+			ForwardAddress: data[2:8],
+			BaseLayer:      BaseLayer{data[:8], data[8:]},
+		}
+		p.AddLayer(forward)
+		return p.NextDecoder(gopacket.DecodeFunc(decodeEthernetCTPFromFunctionType))
+	}
+	return fmt.Errorf("Unknown EthernetCTP function type %v", function)
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv4.go b/vendor/github.com/google/gopacket/layers/dhcpv4.go
new file mode 100644
index 0000000..67c80b7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv4.go
@@ -0,0 +1,595 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// DHCPOp rerprents a bootp operation
+type DHCPOp byte
+
+// bootp operations
+const (
+	DHCPOpRequest DHCPOp = 1
+	DHCPOpReply   DHCPOp = 2
+)
+
+// String returns a string version of a DHCPOp.
+func (o DHCPOp) String() string {
+	switch o {
+	case DHCPOpRequest:
+		return "Request"
+	case DHCPOpReply:
+		return "Reply"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPMsgType represents a DHCP operation
+type DHCPMsgType byte
+
+// Constants that represent DHCP operations
+const (
+	DHCPMsgTypeUnspecified DHCPMsgType = iota
+	DHCPMsgTypeDiscover
+	DHCPMsgTypeOffer
+	DHCPMsgTypeRequest
+	DHCPMsgTypeDecline
+	DHCPMsgTypeAck
+	DHCPMsgTypeNak
+	DHCPMsgTypeRelease
+	DHCPMsgTypeInform
+)
+
+// String returns a string version of a DHCPMsgType.
+func (o DHCPMsgType) String() string {
+	switch o {
+	case DHCPMsgTypeUnspecified:
+		return "Unspecified"
+	case DHCPMsgTypeDiscover:
+		return "Discover"
+	case DHCPMsgTypeOffer:
+		return "Offer"
+	case DHCPMsgTypeRequest:
+		return "Request"
+	case DHCPMsgTypeDecline:
+		return "Decline"
+	case DHCPMsgTypeAck:
+		return "Ack"
+	case DHCPMsgTypeNak:
+		return "Nak"
+	case DHCPMsgTypeRelease:
+		return "Release"
+	case DHCPMsgTypeInform:
+		return "Inform"
+	default:
+		return "Unknown"
+	}
+}
+
+//DHCPMagic is the RFC 2131 "magic cooke" for DHCP.
+var DHCPMagic uint32 = 0x63825363
+
+// DHCPv4 contains data for a single DHCP packet.
+type DHCPv4 struct {
+	BaseLayer
+	Operation    DHCPOp
+	HardwareType LinkType
+	HardwareLen  uint8
+	HardwareOpts uint8
+	Xid          uint32
+	Secs         uint16
+	Flags        uint16
+	ClientIP     net.IP
+	YourClientIP net.IP
+	NextServerIP net.IP
+	RelayAgentIP net.IP
+	ClientHWAddr net.HardwareAddr
+	ServerName   []byte
+	File         []byte
+	Options      DHCPOptions
+}
+
+// DHCPOptions is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPOptions []DHCPOption
+
+// String returns a string version of the options list.
+func (o DHCPOptions) String() string {
+	buf := &bytes.Buffer{}
+	buf.WriteByte('[')
+	for i, opt := range o {
+		buf.WriteString(opt.String())
+		if i+1 != len(o) {
+			buf.WriteString(", ")
+		}
+	}
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv4
+func (d *DHCPv4) LayerType() gopacket.LayerType { return LayerTypeDHCPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 240 {
+		df.SetTruncated()
+		return fmt.Errorf("DHCPv4 length %d too short", len(data))
+	}
+	d.Options = d.Options[:0]
+	d.Operation = DHCPOp(data[0])
+	d.HardwareType = LinkType(data[1])
+	d.HardwareLen = data[2]
+	d.HardwareOpts = data[3]
+	d.Xid = binary.BigEndian.Uint32(data[4:8])
+	d.Secs = binary.BigEndian.Uint16(data[8:10])
+	d.Flags = binary.BigEndian.Uint16(data[10:12])
+	d.ClientIP = net.IP(data[12:16])
+	d.YourClientIP = net.IP(data[16:20])
+	d.NextServerIP = net.IP(data[20:24])
+	d.RelayAgentIP = net.IP(data[24:28])
+	d.ClientHWAddr = net.HardwareAddr(data[28 : 28+d.HardwareLen])
+	d.ServerName = data[44:108]
+	d.File = data[108:236]
+	if binary.BigEndian.Uint32(data[236:240]) != DHCPMagic {
+		return InvalidMagicCookie
+	}
+
+	if len(data) <= 240 {
+		// DHCP Packet could have no option (??)
+		return nil
+	}
+
+	options := data[240:]
+
+	stop := len(options)
+	start := 0
+	for start < stop {
+		o := DHCPOption{}
+		if err := o.decode(options[start:]); err != nil {
+			return err
+		}
+		if o.Type == DHCPOptEnd {
+			break
+		}
+		d.Options = append(d.Options, o)
+		// Check if the option is a single byte pad
+		if o.Type == DHCPOptPad {
+			start++
+		} else {
+			start += int(o.Length) + 2
+		}
+	}
+
+	d.Contents = data
+
+	return nil
+}
+
+// Len returns the length of a DHCPv4 packet.
+func (d *DHCPv4) Len() uint16 {
+	n := uint16(240)
+	for _, o := range d.Options {
+		if o.Type == DHCPOptPad {
+			n++
+		} else {
+			n += uint16(o.Length) + 2
+		}
+	}
+	n++ // for opt end
+	return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	plen := int(d.Len())
+
+	data, err := b.PrependBytes(plen)
+	if err != nil {
+		return err
+	}
+
+	data[0] = byte(d.Operation)
+	data[1] = byte(d.HardwareType)
+	if opts.FixLengths {
+		d.HardwareLen = uint8(len(d.ClientHWAddr))
+	}
+	data[2] = d.HardwareLen
+	data[3] = d.HardwareOpts
+	binary.BigEndian.PutUint32(data[4:8], d.Xid)
+	binary.BigEndian.PutUint16(data[8:10], d.Secs)
+	binary.BigEndian.PutUint16(data[10:12], d.Flags)
+	copy(data[12:16], d.ClientIP.To4())
+	copy(data[16:20], d.YourClientIP.To4())
+	copy(data[20:24], d.NextServerIP.To4())
+	copy(data[24:28], d.RelayAgentIP.To4())
+	copy(data[28:44], d.ClientHWAddr)
+	copy(data[44:108], d.ServerName)
+	copy(data[108:236], d.File)
+	binary.BigEndian.PutUint32(data[236:240], DHCPMagic)
+
+	if len(d.Options) > 0 {
+		offset := 240
+		for _, o := range d.Options {
+			if err := o.encode(data[offset:]); err != nil {
+				return err
+			}
+			// A pad option is only a single byte
+			if o.Type == DHCPOptPad {
+				offset++
+			} else {
+				offset += 2 + len(o.Data)
+			}
+		}
+		optend := NewDHCPOption(DHCPOptEnd, nil)
+		if err := optend.encode(data[offset:]); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv4) CanDecode() gopacket.LayerClass {
+	return LayerTypeDHCPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv4) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv4(data []byte, p gopacket.PacketBuilder) error {
+	dhcp := &DHCPv4{}
+	err := dhcp.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(dhcp)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPOpt represents a DHCP option or parameter from RFC-2132
+type DHCPOpt byte
+
+// Constants for the DHCPOpt options.
+const (
+	DHCPOptPad                   DHCPOpt = 0
+	DHCPOptSubnetMask            DHCPOpt = 1   // 4, net.IP
+	DHCPOptTimeOffset            DHCPOpt = 2   // 4, int32 (signed seconds from UTC)
+	DHCPOptRouter                DHCPOpt = 3   // n*4, [n]net.IP
+	DHCPOptTimeServer            DHCPOpt = 4   // n*4, [n]net.IP
+	DHCPOptNameServer            DHCPOpt = 5   // n*4, [n]net.IP
+	DHCPOptDNS                   DHCPOpt = 6   // n*4, [n]net.IP
+	DHCPOptLogServer             DHCPOpt = 7   // n*4, [n]net.IP
+	DHCPOptCookieServer          DHCPOpt = 8   // n*4, [n]net.IP
+	DHCPOptLPRServer             DHCPOpt = 9   // n*4, [n]net.IP
+	DHCPOptImpressServer         DHCPOpt = 10  // n*4, [n]net.IP
+	DHCPOptResLocServer          DHCPOpt = 11  // n*4, [n]net.IP
+	DHCPOptHostname              DHCPOpt = 12  // n, string
+	DHCPOptBootfileSize          DHCPOpt = 13  // 2, uint16
+	DHCPOptMeritDumpFile         DHCPOpt = 14  // >1, string
+	DHCPOptDomainName            DHCPOpt = 15  // n, string
+	DHCPOptSwapServer            DHCPOpt = 16  // n*4, [n]net.IP
+	DHCPOptRootPath              DHCPOpt = 17  // n, string
+	DHCPOptExtensionsPath        DHCPOpt = 18  // n, string
+	DHCPOptIPForwarding          DHCPOpt = 19  // 1, bool
+	DHCPOptSourceRouting         DHCPOpt = 20  // 1, bool
+	DHCPOptPolicyFilter          DHCPOpt = 21  // 8*n, [n]{net.IP/net.IP}
+	DHCPOptDatagramMTU           DHCPOpt = 22  // 2, uint16
+	DHCPOptDefaultTTL            DHCPOpt = 23  // 1, byte
+	DHCPOptPathMTUAgingTimeout   DHCPOpt = 24  // 4, uint32
+	DHCPOptPathPlateuTableOption DHCPOpt = 25  // 2*n, []uint16
+	DHCPOptInterfaceMTU          DHCPOpt = 26  // 2, uint16
+	DHCPOptAllSubsLocal          DHCPOpt = 27  // 1, bool
+	DHCPOptBroadcastAddr         DHCPOpt = 28  // 4, net.IP
+	DHCPOptMaskDiscovery         DHCPOpt = 29  // 1, bool
+	DHCPOptMaskSupplier          DHCPOpt = 30  // 1, bool
+	DHCPOptRouterDiscovery       DHCPOpt = 31  // 1, bool
+	DHCPOptSolicitAddr           DHCPOpt = 32  // 4, net.IP
+	DHCPOptStaticRoute           DHCPOpt = 33  // n*8, [n]{net.IP/net.IP} -- note the 2nd is router not mask
+	DHCPOptARPTrailers           DHCPOpt = 34  // 1, bool
+	DHCPOptARPTimeout            DHCPOpt = 35  // 4, uint32
+	DHCPOptEthernetEncap         DHCPOpt = 36  // 1, bool
+	DHCPOptTCPTTL                DHCPOpt = 37  // 1, byte
+	DHCPOptTCPKeepAliveInt       DHCPOpt = 38  // 4, uint32
+	DHCPOptTCPKeepAliveGarbage   DHCPOpt = 39  // 1, bool
+	DHCPOptNISDomain             DHCPOpt = 40  // n, string
+	DHCPOptNISServers            DHCPOpt = 41  // 4*n,  [n]net.IP
+	DHCPOptNTPServers            DHCPOpt = 42  // 4*n, [n]net.IP
+	DHCPOptVendorOption          DHCPOpt = 43  // n, [n]byte // may be encapsulated.
+	DHCPOptNetBIOSTCPNS          DHCPOpt = 44  // 4*n, [n]net.IP
+	DHCPOptNetBIOSTCPDDS         DHCPOpt = 45  // 4*n, [n]net.IP
+	DHCPOptNETBIOSTCPNodeType    DHCPOpt = 46  // 1, magic byte
+	DHCPOptNetBIOSTCPScope       DHCPOpt = 47  // n, string
+	DHCPOptXFontServer           DHCPOpt = 48  // n, string
+	DHCPOptXDisplayManager       DHCPOpt = 49  // n, string
+	DHCPOptRequestIP             DHCPOpt = 50  // 4, net.IP
+	DHCPOptLeaseTime             DHCPOpt = 51  // 4, uint32
+	DHCPOptExtOptions            DHCPOpt = 52  // 1, 1/2/3
+	DHCPOptMessageType           DHCPOpt = 53  // 1, 1-7
+	DHCPOptServerID              DHCPOpt = 54  // 4, net.IP
+	DHCPOptParamsRequest         DHCPOpt = 55  // n, []byte
+	DHCPOptMessage               DHCPOpt = 56  // n, 3
+	DHCPOptMaxMessageSize        DHCPOpt = 57  // 2, uint16
+	DHCPOptT1                    DHCPOpt = 58  // 4, uint32
+	DHCPOptT2                    DHCPOpt = 59  // 4, uint32
+	DHCPOptClassID               DHCPOpt = 60  // n, []byte
+	DHCPOptClientID              DHCPOpt = 61  // n >=  2, []byte
+	DHCPOptDomainSearch          DHCPOpt = 119 // n, string
+	DHCPOptSIPServers            DHCPOpt = 120 // n, url
+	DHCPOptClasslessStaticRoute  DHCPOpt = 121 //
+	DHCPOptMUDURLV4              DHCPOpt = 161 // n, string
+	DHCPOptEnd                   DHCPOpt = 255
+)
+
+// String returns a string version of a DHCPOpt.
+func (o DHCPOpt) String() string {
+	switch o {
+	case DHCPOptPad:
+		return "(padding)"
+	case DHCPOptSubnetMask:
+		return "SubnetMask"
+	case DHCPOptTimeOffset:
+		return "TimeOffset"
+	case DHCPOptRouter:
+		return "Router"
+	case DHCPOptTimeServer:
+		return "rfc868" // old time server protocol stringified to dissuade confusion w. NTP
+	case DHCPOptNameServer:
+		return "ien116" // obscure nameserver protocol stringified to dissuade confusion w. DNS
+	case DHCPOptDNS:
+		return "DNS"
+	case DHCPOptLogServer:
+		return "mitLCS" // MIT LCS server protocol yada yada w. Syslog
+	case DHCPOptCookieServer:
+		return "CookieServer"
+	case DHCPOptLPRServer:
+		return "LPRServer"
+	case DHCPOptImpressServer:
+		return "ImpressServer"
+	case DHCPOptResLocServer:
+		return "ResourceLocationServer"
+	case DHCPOptHostname:
+		return "Hostname"
+	case DHCPOptBootfileSize:
+		return "BootfileSize"
+	case DHCPOptMeritDumpFile:
+		return "MeritDumpFile"
+	case DHCPOptDomainName:
+		return "DomainName"
+	case DHCPOptSwapServer:
+		return "SwapServer"
+	case DHCPOptRootPath:
+		return "RootPath"
+	case DHCPOptExtensionsPath:
+		return "ExtensionsPath"
+	case DHCPOptIPForwarding:
+		return "IPForwarding"
+	case DHCPOptSourceRouting:
+		return "SourceRouting"
+	case DHCPOptPolicyFilter:
+		return "PolicyFilter"
+	case DHCPOptDatagramMTU:
+		return "DatagramMTU"
+	case DHCPOptDefaultTTL:
+		return "DefaultTTL"
+	case DHCPOptPathMTUAgingTimeout:
+		return "PathMTUAgingTimeout"
+	case DHCPOptPathPlateuTableOption:
+		return "PathPlateuTableOption"
+	case DHCPOptInterfaceMTU:
+		return "InterfaceMTU"
+	case DHCPOptAllSubsLocal:
+		return "AllSubsLocal"
+	case DHCPOptBroadcastAddr:
+		return "BroadcastAddress"
+	case DHCPOptMaskDiscovery:
+		return "MaskDiscovery"
+	case DHCPOptMaskSupplier:
+		return "MaskSupplier"
+	case DHCPOptRouterDiscovery:
+		return "RouterDiscovery"
+	case DHCPOptSolicitAddr:
+		return "SolicitAddr"
+	case DHCPOptStaticRoute:
+		return "StaticRoute"
+	case DHCPOptARPTrailers:
+		return "ARPTrailers"
+	case DHCPOptARPTimeout:
+		return "ARPTimeout"
+	case DHCPOptEthernetEncap:
+		return "EthernetEncap"
+	case DHCPOptTCPTTL:
+		return "TCPTTL"
+	case DHCPOptTCPKeepAliveInt:
+		return "TCPKeepAliveInt"
+	case DHCPOptTCPKeepAliveGarbage:
+		return "TCPKeepAliveGarbage"
+	case DHCPOptNISDomain:
+		return "NISDomain"
+	case DHCPOptNISServers:
+		return "NISServers"
+	case DHCPOptNTPServers:
+		return "NTPServers"
+	case DHCPOptVendorOption:
+		return "VendorOption"
+	case DHCPOptNetBIOSTCPNS:
+		return "NetBIOSOverTCPNS"
+	case DHCPOptNetBIOSTCPDDS:
+		return "NetBiosOverTCPDDS"
+	case DHCPOptNETBIOSTCPNodeType:
+		return "NetBIOSOverTCPNodeType"
+	case DHCPOptNetBIOSTCPScope:
+		return "NetBIOSOverTCPScope"
+	case DHCPOptXFontServer:
+		return "XFontServer"
+	case DHCPOptXDisplayManager:
+		return "XDisplayManager"
+	case DHCPOptEnd:
+		return "(end)"
+	case DHCPOptSIPServers:
+		return "SipServers"
+	case DHCPOptRequestIP:
+		return "RequestIP"
+	case DHCPOptLeaseTime:
+		return "LeaseTime"
+	case DHCPOptExtOptions:
+		return "ExtOpts"
+	case DHCPOptMessageType:
+		return "MessageType"
+	case DHCPOptServerID:
+		return "ServerID"
+	case DHCPOptParamsRequest:
+		return "ParamsRequest"
+	case DHCPOptMessage:
+		return "Message"
+	case DHCPOptMaxMessageSize:
+		return "MaxDHCPSize"
+	case DHCPOptT1:
+		return "Timer1"
+	case DHCPOptT2:
+		return "Timer2"
+	case DHCPOptClassID:
+		return "ClassID"
+	case DHCPOptClientID:
+		return "ClientID"
+	case DHCPOptDomainSearch:
+		return "DomainSearch"
+	case DHCPOptClasslessStaticRoute:
+		return "ClasslessStaticRoute"
+	case DHCPOptMUDURLV4:
+		return "ManufacturerUsageDescriptionURL"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPOption rerpresents a DHCP option.
+type DHCPOption struct {
+	Type   DHCPOpt
+	Length uint8
+	Data   []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPOption) String() string {
+	switch o.Type {
+
+	case DHCPOptHostname, DHCPOptMeritDumpFile, DHCPOptDomainName, DHCPOptRootPath,
+		DHCPOptExtensionsPath, DHCPOptNISDomain, DHCPOptNetBIOSTCPScope, DHCPOptXFontServer,
+		DHCPOptXDisplayManager, DHCPOptMessage, DHCPOptDomainSearch: // string
+		return fmt.Sprintf("Option(%s:%s)", o.Type, string(o.Data))
+
+	case DHCPOptMessageType:
+		if len(o.Data) != 1 {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+		}
+		return fmt.Sprintf("Option(%s:%s)", o.Type, DHCPMsgType(o.Data[0]))
+
+	case DHCPOptSubnetMask, DHCPOptServerID, DHCPOptBroadcastAddr,
+		DHCPOptSolicitAddr, DHCPOptRequestIP: // net.IP
+		if len(o.Data) < 4 {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+		}
+		return fmt.Sprintf("Option(%s:%s)", o.Type, net.IP(o.Data))
+
+	case DHCPOptT1, DHCPOptT2, DHCPOptLeaseTime, DHCPOptPathMTUAgingTimeout,
+		DHCPOptARPTimeout, DHCPOptTCPKeepAliveInt: // uint32
+		if len(o.Data) != 4 {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Type)
+		}
+		return fmt.Sprintf("Option(%s:%d)", o.Type,
+			uint32(o.Data[0])<<24|uint32(o.Data[1])<<16|uint32(o.Data[2])<<8|uint32(o.Data[3]))
+
+	case DHCPOptParamsRequest:
+		buf := &bytes.Buffer{}
+		buf.WriteString(fmt.Sprintf("Option(%s:", o.Type))
+		for i, v := range o.Data {
+			buf.WriteString(DHCPOpt(v).String())
+			if i+1 != len(o.Data) {
+				buf.WriteByte(',')
+			}
+		}
+		buf.WriteString(")")
+		return buf.String()
+
+	default:
+		return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data)
+	}
+}
+
+// NewDHCPOption constructs a new DHCPOption with a given type and data.
+func NewDHCPOption(t DHCPOpt, data []byte) DHCPOption {
+	o := DHCPOption{Type: t}
+	if data != nil {
+		o.Data = data
+		o.Length = uint8(len(data))
+	}
+	return o
+}
+
+func (o *DHCPOption) encode(b []byte) error {
+	switch o.Type {
+	case DHCPOptPad, DHCPOptEnd:
+		b[0] = byte(o.Type)
+	default:
+		b[0] = byte(o.Type)
+		b[1] = o.Length
+		copy(b[2:], o.Data)
+	}
+	return nil
+}
+
+func (o *DHCPOption) decode(data []byte) error {
+	if len(data) < 1 {
+		// Pad/End have a length of 1
+		return DecOptionNotEnoughData
+	}
+	o.Type = DHCPOpt(data[0])
+	switch o.Type {
+	case DHCPOptPad, DHCPOptEnd:
+		o.Data = nil
+	default:
+		if len(data) < 2 {
+			return DecOptionNotEnoughData
+		}
+		o.Length = data[1]
+		if int(o.Length) > len(data[2:]) {
+			return DecOptionMalformed
+		}
+		o.Data = data[2 : 2+int(o.Length)]
+	}
+	return nil
+}
+
+// DHCPv4Error is used for constant errors for DHCPv4. It is needed for test asserts.
+type DHCPv4Error string
+
+// DHCPv4Error implements error interface.
+func (d DHCPv4Error) Error() string {
+	return string(d)
+}
+
+const (
+	// DecOptionNotEnoughData is returned when there is not enough data during option's decode process
+	DecOptionNotEnoughData = DHCPv4Error("Not enough data to decode")
+	// DecOptionMalformed is returned when the option is malformed
+	DecOptionMalformed = DHCPv4Error("Option is malformed")
+	// InvalidMagicCookie is returned when Magic cookie is missing into BOOTP header
+	InvalidMagicCookie = DHCPv4Error("Bad DHCP header")
+)
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6.go b/vendor/github.com/google/gopacket/layers/dhcpv6.go
new file mode 100644
index 0000000..22a1bde
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6.go
@@ -0,0 +1,541 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// DHCPv6MsgType represents a DHCPv6 operation
+type DHCPv6MsgType byte
+
+// Constants that represent DHCP operations
+const (
+	DHCPv6MsgTypeUnspecified DHCPv6MsgType = iota
+	DHCPv6MsgTypeSolicit
+	DHCPv6MsgTypeAdvertise
+	DHCPv6MsgTypeRequest
+	DHCPv6MsgTypeConfirm
+	DHCPv6MsgTypeRenew
+	DHCPv6MsgTypeRebind
+	DHCPv6MsgTypeReply
+	DHCPv6MsgTypeRelease
+	DHCPv6MsgTypeDecline
+	DHCPv6MsgTypeReconfigure
+	DHCPv6MsgTypeInformationRequest
+	DHCPv6MsgTypeRelayForward
+	DHCPv6MsgTypeRelayReply
+)
+
+// String returns a string version of a DHCPv6MsgType.
+func (o DHCPv6MsgType) String() string {
+	switch o {
+	case DHCPv6MsgTypeUnspecified:
+		return "Unspecified"
+	case DHCPv6MsgTypeSolicit:
+		return "Solicit"
+	case DHCPv6MsgTypeAdvertise:
+		return "Advertise"
+	case DHCPv6MsgTypeRequest:
+		return "Request"
+	case DHCPv6MsgTypeConfirm:
+		return "Confirm"
+	case DHCPv6MsgTypeRenew:
+		return "Renew"
+	case DHCPv6MsgTypeRebind:
+		return "Rebind"
+	case DHCPv6MsgTypeReply:
+		return "Reply"
+	case DHCPv6MsgTypeRelease:
+		return "Release"
+	case DHCPv6MsgTypeDecline:
+		return "Decline"
+	case DHCPv6MsgTypeReconfigure:
+		return "Reconfigure"
+	case DHCPv6MsgTypeInformationRequest:
+		return "InformationRequest"
+	case DHCPv6MsgTypeRelayForward:
+		return "RelayForward"
+	case DHCPv6MsgTypeRelayReply:
+		return "RelayReply"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPv6 contains data for a single DHCP packet.
+type DHCPv6 struct {
+	BaseLayer
+	MsgType       DHCPv6MsgType
+	HopCount      uint8
+	LinkAddr      net.IP
+	PeerAddr      net.IP
+	TransactionID []byte
+	Options       DHCPv6Options
+}
+
+// LayerType returns gopacket.LayerTypeDHCPv6
+func (d *DHCPv6) LayerType() gopacket.LayerType { return LayerTypeDHCPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *DHCPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("DHCPv6 length %d too short", len(data))
+	}
+	d.BaseLayer = BaseLayer{Contents: data}
+	d.Options = d.Options[:0]
+	d.MsgType = DHCPv6MsgType(data[0])
+
+	offset := 0
+	if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+		if len(data) < 34 {
+			df.SetTruncated()
+			return fmt.Errorf("DHCPv6 length %d too short for message type %d", len(data), d.MsgType)
+		}
+		d.HopCount = data[1]
+		d.LinkAddr = net.IP(data[2:18])
+		d.PeerAddr = net.IP(data[18:34])
+		offset = 34
+	} else {
+		d.TransactionID = data[1:4]
+		offset = 4
+	}
+
+	stop := len(data)
+	for offset < stop {
+		o := DHCPv6Option{}
+		if err := o.decode(data[offset:]); err != nil {
+			return err
+		}
+		d.Options = append(d.Options, o)
+		offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+	}
+
+	return nil
+}
+
+// Len returns the length of a DHCPv6 packet.
+func (d *DHCPv6) Len() int {
+	n := 1
+	if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+		n += 33
+	} else {
+		n += 3
+	}
+
+	for _, o := range d.Options {
+		n += int(o.Length) + 4 // 2 from option code, 2 from option length
+	}
+
+	return n
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *DHCPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	plen := int(d.Len())
+
+	data, err := b.PrependBytes(plen)
+	if err != nil {
+		return err
+	}
+
+	offset := 0
+	data[0] = byte(d.MsgType)
+	if d.MsgType == DHCPv6MsgTypeRelayForward || d.MsgType == DHCPv6MsgTypeRelayReply {
+		data[1] = byte(d.HopCount)
+		copy(data[2:18], d.LinkAddr.To16())
+		copy(data[18:34], d.PeerAddr.To16())
+		offset = 34
+	} else {
+		copy(data[1:4], d.TransactionID)
+		offset = 4
+	}
+
+	if len(d.Options) > 0 {
+		for _, o := range d.Options {
+			if err := o.encode(data[offset:], opts); err != nil {
+				return err
+			}
+			offset += int(o.Length) + 4 // 2 from option code, 2 from option length
+		}
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *DHCPv6) CanDecode() gopacket.LayerClass {
+	return LayerTypeDHCPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *DHCPv6) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeDHCPv6(data []byte, p gopacket.PacketBuilder) error {
+	dhcp := &DHCPv6{}
+	err := dhcp.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(dhcp)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// DHCPv6StatusCode represents a DHCP status code - RFC-3315
+type DHCPv6StatusCode uint16
+
+// Constants for the DHCPv6StatusCode.
+const (
+	DHCPv6StatusCodeSuccess DHCPv6StatusCode = iota
+	DHCPv6StatusCodeUnspecFail
+	DHCPv6StatusCodeNoAddrsAvail
+	DHCPv6StatusCodeNoBinding
+	DHCPv6StatusCodeNotOnLink
+	DHCPv6StatusCodeUseMulticast
+)
+
+// String returns a string version of a DHCPv6StatusCode.
+func (o DHCPv6StatusCode) String() string {
+	switch o {
+	case DHCPv6StatusCodeSuccess:
+		return "Success"
+	case DHCPv6StatusCodeUnspecFail:
+		return "UnspecifiedFailure"
+	case DHCPv6StatusCodeNoAddrsAvail:
+		return "NoAddressAvailable"
+	case DHCPv6StatusCodeNoBinding:
+		return "NoBinding"
+	case DHCPv6StatusCodeNotOnLink:
+		return "NotOnLink"
+	case DHCPv6StatusCodeUseMulticast:
+		return "UseMulticast"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPv6DUIDType represents a DHCP DUID - RFC-3315
+type DHCPv6DUIDType uint16
+
+// Constants for the DHCPv6DUIDType.
+const (
+	DHCPv6DUIDTypeLLT DHCPv6DUIDType = iota + 1
+	DHCPv6DUIDTypeEN
+	DHCPv6DUIDTypeLL
+	DHCPv6DUIDTypeUUID
+)
+
+// String returns a string version of a DHCPv6DUIDType.
+func (o DHCPv6DUIDType) String() string {
+	switch o {
+	case DHCPv6DUIDTypeLLT:
+		return "LLT"
+	case DHCPv6DUIDTypeEN:
+		return "EN"
+	case DHCPv6DUIDTypeLL:
+		return "LL"
+	case DHCPv6DUIDTypeUUID:
+		return "UUID"
+	default:
+		return "Unknown"
+	}
+}
+
+// DHCPv6DUID means DHCP Unique Identifier as stated in RFC 3315, section 9 (https://tools.ietf.org/html/rfc3315#page-19)
+type DHCPv6DUID struct {
+	Type DHCPv6DUIDType
+	// LLT, LL
+	HardwareType []byte
+	// EN
+	EnterpriseNumber []byte
+	// LLT
+	Time []byte
+	// LLT, LL
+	LinkLayerAddress net.HardwareAddr
+	// EN
+	Identifier []byte
+	// UUID
+	UniqueIdentifier []byte
+}
+
+// DecodeFromBytes decodes the given bytes into a DHCPv6DUID
+func (d *DHCPv6DUID) DecodeFromBytes(data []byte) error {
+	if len(data) < 2 {
+		return fmt.Errorf("Not enough bytes to decode: %d", len(data))
+	}
+
+	d.Type = DHCPv6DUIDType(binary.BigEndian.Uint16(data[:2]))
+	if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+		if len(data) < 4 {
+			return fmt.Errorf("Not enough bytes to decode: %d", len(data))
+		}
+		d.HardwareType = data[2:4]
+	}
+
+	if d.Type == DHCPv6DUIDTypeLLT {
+		if len(data) < 8 {
+			return fmt.Errorf("Not enough bytes to decode: %d", len(data))
+		}
+		d.Time = data[4:8]
+		d.LinkLayerAddress = net.HardwareAddr(data[8:])
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		if len(data) < 6 {
+			return fmt.Errorf("Not enough bytes to decode: %d", len(data))
+		}
+		d.EnterpriseNumber = data[2:6]
+		d.Identifier = data[6:]
+	} else if d.Type == DHCPv6DUIDTypeUUID {
+		d.UniqueIdentifier = data[2:]
+	} else { // DHCPv6DUIDTypeLL
+		if len(data) < 4 {
+			return fmt.Errorf("Not enough bytes to decode: %d", len(data))
+		}
+		d.LinkLayerAddress = net.HardwareAddr(data[4:])
+	}
+
+	return nil
+}
+
+// Encode encodes the DHCPv6DUID in a slice of bytes
+func (d *DHCPv6DUID) Encode() []byte {
+	length := d.Len()
+	data := make([]byte, length)
+	binary.BigEndian.PutUint16(data[0:2], uint16(d.Type))
+
+	if d.Type == DHCPv6DUIDTypeLLT || d.Type == DHCPv6DUIDTypeLL {
+		copy(data[2:4], d.HardwareType)
+	}
+
+	if d.Type == DHCPv6DUIDTypeLLT {
+		copy(data[4:8], d.Time)
+		copy(data[8:], d.LinkLayerAddress)
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		copy(data[2:6], d.EnterpriseNumber)
+		copy(data[6:], d.Identifier)
+	} else {
+		copy(data[4:], d.LinkLayerAddress)
+	}
+
+	return data
+}
+
+// Len returns the length of the DHCPv6DUID, respecting the type
+func (d *DHCPv6DUID) Len() int {
+	length := 2 // d.Type
+	if d.Type == DHCPv6DUIDTypeLLT {
+		length += 2 /*HardwareType*/ + 4 /*d.Time*/ + len(d.LinkLayerAddress)
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		length += 4 /*d.EnterpriseNumber*/ + len(d.Identifier)
+	} else { // LL
+		length += 2 /*d.HardwareType*/ + len(d.LinkLayerAddress)
+	}
+
+	return length
+}
+
+func (d *DHCPv6DUID) String() string {
+	duid := "Type: " + d.Type.String() + ", "
+	if d.Type == DHCPv6DUIDTypeLLT {
+		duid += fmt.Sprintf("HardwareType: %v, Time: %v, LinkLayerAddress: %v", d.HardwareType, d.Time, d.LinkLayerAddress)
+	} else if d.Type == DHCPv6DUIDTypeEN {
+		duid += fmt.Sprintf("EnterpriseNumber: %v, Identifier: %v", d.EnterpriseNumber, d.Identifier)
+	} else { // DHCPv6DUIDTypeLL
+		duid += fmt.Sprintf("HardwareType: %v, LinkLayerAddress: %v", d.HardwareType, d.LinkLayerAddress)
+	}
+	return duid
+}
+
+func decodeDHCPv6DUID(data []byte) (*DHCPv6DUID, error) {
+	duid := &DHCPv6DUID{}
+	err := duid.DecodeFromBytes(data)
+	if err != nil {
+		return nil, err
+	}
+	return duid, nil
+}
+
+type DHCPv6IAPD struct {
+	IAID [4]byte
+	T1   uint32
+	T2   uint32
+	PD   DHCPv6PD
+}
+
+type DHCPv6PD struct {
+	PrefLifeTime  uint32
+	ValidLifeTime uint32
+	PrefixLength  byte
+	Prefix        net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into a DHCPv6DUID
+func (d *DHCPv6IAPD) DecodeFromBytes(data []byte) error {
+	if len(data) != 41 {
+		return errors.New("Not enough bytes to decode: " + string(len(data)))
+	}
+
+	copy(d.IAID[:], data[:4])
+	d.T1 = binary.BigEndian.Uint32(data[4:8])
+	d.T2 = binary.BigEndian.Uint32(data[8:12])
+
+	PDType := binary.BigEndian.Uint16(data[12:14])
+	if PDType != uint16(DHCPv6OptIAPrefix) {
+		return errors.New("Invalid IE Included: " + string(PDType))
+	}
+	PDLen := binary.BigEndian.Uint16(data[14:16])
+	if PDLen != 25 {
+		return errors.New("Invalid Length for PD: " + string(PDLen))
+	}
+	if err := (&d.PD).DecodeFromBytes(data[16:]); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (pd *DHCPv6PD) DecodeFromBytes(data []byte) error {
+	pd.PrefLifeTime = binary.BigEndian.Uint32(data[0:4])
+	pd.ValidLifeTime = binary.BigEndian.Uint32(data[4:8])
+	pd.PrefixLength = data[8]
+	pd.Prefix = data[9:]
+	return nil
+}
+
+func (d *DHCPv6IAPD) Encode() []byte {
+	length := 41
+	data := make([]byte, length)
+	copy(data[0:4], d.IAID[:])
+	binary.BigEndian.PutUint32(data[4:8], d.T1)
+	binary.BigEndian.PutUint32(data[8:12], d.T2)
+	binary.BigEndian.PutUint16(data[12:14], uint16(DHCPv6OptIAPrefix))
+	binary.BigEndian.PutUint16(data[14:16], 25)
+	x := (&d.PD).Encode()
+	copy(data[16:41], x)
+	return data
+}
+
+func (pd *DHCPv6PD) Encode() []byte {
+	data := make([]byte, 25)
+	binary.BigEndian.PutUint32(data[0:4], pd.PrefLifeTime)
+	binary.BigEndian.PutUint32(data[4:8], pd.ValidLifeTime)
+	data[8] = pd.PrefixLength
+	copy(data[9:25], pd.Prefix)
+	return data
+}
+
+// Implementation of Remote ID of DHCPv6 used by LDRA
+// Defined in RFC 6221
+type DHCPv6RemoteId struct {
+	EnterpriseID [4]byte
+	RemoteId     []byte
+}
+
+func (d *DHCPv6RemoteId) Encode() []byte {
+	length := 4 + len(d.RemoteId)
+	data := make([]byte, length)
+	copy(data[0:4], d.EnterpriseID[:])
+	copy(data[4:], d.RemoteId)
+	return data
+}
+
+func (d *DHCPv6RemoteId) DecodeFromBytes(data []byte) error {
+	copy(d.EnterpriseID[:], data[0:4])
+	copy(d.RemoteId, data[4:])
+	return nil
+}
+
+// Implementation of Interface Id of DHCPv6 used by LDRA
+// Defined in RFC 6221
+type DHCPv6IntfId struct {
+	Data []byte
+}
+
+func (d *DHCPv6IntfId) Encode() []byte {
+	length := len(d.Data)
+	data := make([]byte, length)
+	copy(data[0:length], d.Data)
+	return data
+}
+
+func (d *DHCPv6IntfId) DecodeFromBytes(data []byte) error {
+	copy(d.Data, data)
+	return nil
+}
+
+type DHCPv6IANA struct {
+	IAID [4]byte
+	T1   uint32
+	T2   uint32
+	IA   DHCPv6IA
+}
+
+type DHCPv6IA struct {
+	PrefLifeTime  uint32
+	ValidLifeTime uint32
+	IPv6Addr      net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into a DHCPv6DUID
+func (d *DHCPv6IANA) DecodeFromBytes(data []byte) error {
+	if len(data) != 40 {
+		return errors.New("Not enough bytes to decode: " + string(len(data)))
+	}
+
+	copy(d.IAID[:], data[:4])
+	d.T1 = binary.BigEndian.Uint32(data[4:8])
+	d.T2 = binary.BigEndian.Uint32(data[8:12])
+
+	IAType := binary.BigEndian.Uint16(data[12:14])
+	if IAType != uint16(DHCPv6OptIAAddr) {
+		return errors.New("Invalid IE Included: " + string(IAType))
+	}
+	IALen := binary.BigEndian.Uint16(data[14:16])
+	if IALen != 24 {
+		return errors.New("Invalid Length for PD: " + string(IALen))
+	}
+	if err := (&d.IA).DecodeFromBytes(data[16:]); err != nil {
+		return err
+	}
+	return nil
+}
+
+func (ia *DHCPv6IA) DecodeFromBytes(data []byte) error {
+	ia.IPv6Addr = data[0:16]
+	ia.PrefLifeTime = binary.BigEndian.Uint32(data[16:20])
+	ia.ValidLifeTime = binary.BigEndian.Uint32(data[20:])
+
+	return nil
+}
+
+func (d *DHCPv6IANA) Encode() []byte {
+	length := 40
+	data := make([]byte, length)
+	copy(data[0:4], d.IAID[:])
+	binary.BigEndian.PutUint32(data[4:8], d.T1)
+	binary.BigEndian.PutUint32(data[8:12], d.T2)
+	binary.BigEndian.PutUint16(data[12:14], uint16(DHCPv6OptIAPrefix))
+	binary.BigEndian.PutUint16(data[14:16], 24)
+	x := (&d.IA).Encode()
+	copy(data[16:40], x)
+	return data
+}
+
+func (ia *DHCPv6IA) Encode() []byte {
+	data := make([]byte, 24)
+	copy(data[0:16], ia.IPv6Addr)
+	binary.BigEndian.PutUint32(data[16:20], ia.PrefLifeTime)
+	binary.BigEndian.PutUint32(data[20:24], ia.ValidLifeTime)
+	return data
+}
diff --git a/vendor/github.com/google/gopacket/layers/dhcpv6_options.go b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
new file mode 100644
index 0000000..5a1f991
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dhcpv6_options.go
@@ -0,0 +1,621 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// DHCPv6Opt represents a DHCP option or parameter from RFC-3315
+type DHCPv6Opt uint16
+
+// Constants for the DHCPv6Opt options.
+const (
+	DHCPv6OptClientID           DHCPv6Opt = 1
+	DHCPv6OptServerID           DHCPv6Opt = 2
+	DHCPv6OptIANA               DHCPv6Opt = 3
+	DHCPv6OptIATA               DHCPv6Opt = 4
+	DHCPv6OptIAAddr             DHCPv6Opt = 5
+	DHCPv6OptOro                DHCPv6Opt = 6
+	DHCPv6OptPreference         DHCPv6Opt = 7
+	DHCPv6OptElapsedTime        DHCPv6Opt = 8
+	DHCPv6OptRelayMessage       DHCPv6Opt = 9
+	DHCPv6OptAuth               DHCPv6Opt = 11
+	DHCPv6OptUnicast            DHCPv6Opt = 12
+	DHCPv6OptStatusCode         DHCPv6Opt = 13
+	DHCPv6OptRapidCommit        DHCPv6Opt = 14
+	DHCPv6OptUserClass          DHCPv6Opt = 15
+	DHCPv6OptVendorClass        DHCPv6Opt = 16
+	DHCPv6OptVendorOpts         DHCPv6Opt = 17
+	DHCPv6OptInterfaceID        DHCPv6Opt = 18
+	DHCPv6OptReconfigureMessage DHCPv6Opt = 19
+	DHCPv6OptReconfigureAccept  DHCPv6Opt = 20
+
+	// RFC 3319 Session Initiation Protocol (SIP)
+	DHCPv6OptSIPServersDomainList  DHCPv6Opt = 21
+	DHCPv6OptSIPServersAddressList DHCPv6Opt = 22
+
+	// RFC 3646 DNS Configuration
+	DHCPv6OptDNSServers DHCPv6Opt = 23
+	DHCPv6OptDomainList DHCPv6Opt = 24
+
+	// RFC 3633 Prefix Delegation
+	DHCPv6OptIAPD     DHCPv6Opt = 25
+	DHCPv6OptIAPrefix DHCPv6Opt = 26
+
+	// RFC 3898 Network Information Service (NIS)
+	DHCPv6OptNISServers     DHCPv6Opt = 27
+	DHCPv6OptNISPServers    DHCPv6Opt = 28
+	DHCPv6OptNISDomainName  DHCPv6Opt = 29
+	DHCPv6OptNISPDomainName DHCPv6Opt = 30
+
+	// RFC 4075 Simple Network Time Protocol (SNTP)
+	DHCPv6OptSNTPServers DHCPv6Opt = 31
+
+	// RFC 4242 Information Refresh Time Option
+	DHCPv6OptInformationRefreshTime DHCPv6Opt = 32
+
+	// RFC 4280 Broadcast and Multicast Control Servers
+	DHCPv6OptBCMCSServerDomainNameList DHCPv6Opt = 33
+	DHCPv6OptBCMCSServerAddressList    DHCPv6Opt = 34
+
+	// RFC 4776 Civic Address ConfigurationOption
+	DHCPv6OptGeoconfCivic DHCPv6Opt = 36
+
+	// RFC 4649 Relay Agent Remote-ID
+	DHCPv6OptRemoteID DHCPv6Opt = 37
+
+	// RFC 4580 Relay Agent Subscriber-ID
+	DHCPv6OptSubscriberID DHCPv6Opt = 38
+
+	// RFC 4704 Client Full Qualified Domain Name (FQDN)
+	DHCPv6OptClientFQDN DHCPv6Opt = 39
+
+	// RFC 5192 Protocol for Carrying Authentication for Network Access (PANA)
+	DHCPv6OptPanaAgent DHCPv6Opt = 40
+
+	// RFC 4833 Timezone Options
+	DHCPv6OptNewPOSIXTimezone DHCPv6Opt = 41
+	DHCPv6OptNewTZDBTimezone  DHCPv6Opt = 42
+
+	// RFC 4994 Relay Agent Echo Request
+	DHCPv6OptEchoRequestOption DHCPv6Opt = 43
+
+	// RFC 5007 Leasequery
+	DHCPv6OptLQQuery      DHCPv6Opt = 44
+	DHCPv6OptCLTTime      DHCPv6Opt = 45
+	DHCPv6OptClientData   DHCPv6Opt = 46
+	DHCPv6OptLQRelayData  DHCPv6Opt = 47
+	DHCPv6OptLQClientLink DHCPv6Opt = 48
+
+	// RFC 6610 Home Information Discovery in Mobile IPv6 (MIPv6)
+	DHCPv6OptMIP6HNIDF DHCPv6Opt = 49
+	DHCPv6OptMIP6VDINF DHCPv6Opt = 50
+	DHCPv6OptMIP6IDINF DHCPv6Opt = 69
+	DHCPv6OptMIP6UDINF DHCPv6Opt = 70
+	DHCPv6OptMIP6HNP   DHCPv6Opt = 71
+	DHCPv6OptMIP6HAA   DHCPv6Opt = 72
+	DHCPv6OptMIP6HAF   DHCPv6Opt = 73
+
+	// RFC 5223 Discovering Location-to-Service Translation (LoST) Servers
+	DHCPv6OptV6LOST DHCPv6Opt = 51
+
+	// RFC 5417 Control And Provisioning of Wireless Access Points (CAPWAP)
+	DHCPv6OptCAPWAPACV6 DHCPv6Opt = 52
+
+	// RFC 5460 Bulk Leasequery
+	DHCPv6OptRelayID DHCPv6Opt = 53
+
+	// RFC 5678 IEEE 802.21 Mobility Services (MoS) Discovery
+	DHCPv6OptIPv6AddressMoS DHCPv6Opt = 54
+	DHCPv6OptIPv6FQDNMoS    DHCPv6Opt = 55
+
+	// RFC 5908 NTP Server Option
+	DHCPv6OptNTPServer DHCPv6Opt = 56
+
+	// RFC 5986 Discovering the Local Location Information Server (LIS)
+	DHCPv6OptV6AccessDomain DHCPv6Opt = 57
+
+	// RFC 5986 SIP User Agent
+	DHCPv6OptSIPUACSList DHCPv6Opt = 58
+
+	// RFC 5970 Options for Network Boot
+	DHCPv6OptBootFileURL    DHCPv6Opt = 59
+	DHCPv6OptBootFileParam  DHCPv6Opt = 60
+	DHCPv6OptClientArchType DHCPv6Opt = 61
+	DHCPv6OptNII            DHCPv6Opt = 62
+
+	// RFC 6225 Coordinate-Based Location Configuration Information
+	DHCPv6OptGeolocation DHCPv6Opt = 63
+
+	// RFC 6334 Dual-Stack Lite
+	DHCPv6OptAFTRName DHCPv6Opt = 64
+
+	// RFC 6440 EAP Re-authentication Protocol (ERP)
+	DHCPv6OptERPLocalDomainName DHCPv6Opt = 65
+
+	// RFC 6422 Relay-Supplied DHCP Options
+	DHCPv6OptRSOO DHCPv6Opt = 66
+
+	// RFC 6603 Prefix Exclude Option for DHCPv6-based Prefix Delegation
+	DHCPv6OptPDExclude DHCPv6Opt = 67
+
+	// RFC 6607 Virtual Subnet Selection
+	DHCPv6OptVSS DHCPv6Opt = 68
+
+	// RFC 6731 Improved Recursive DNS Server Selection for Multi-Interfaced Nodes
+	DHCPv6OptRDNSSSelection DHCPv6Opt = 74
+
+	// RFC 6784 Kerberos Options for DHCPv6
+	DHCPv6OptKRBPrincipalName DHCPv6Opt = 75
+	DHCPv6OptKRBRealmName     DHCPv6Opt = 76
+	DHCPv6OptKRBKDC           DHCPv6Opt = 77
+
+	// RFC 6939 Client Link-Layer Address Option
+	DHCPv6OptClientLinkLayerAddress DHCPv6Opt = 79
+
+	// RFC 6977 Triggering DHCPv6 Reconfiguration from Relay Agents
+	DHCPv6OptLinkAddress DHCPv6Opt = 80
+
+	// RFC 7037 RADIUS Option for the DHCPv6 Relay Agent
+	DHCPv6OptRADIUS DHCPv6Opt = 81
+
+	// RFC 7083 Modification to Default Values of SOL_MAX_RT and INF_MAX_RT
+	DHCPv6OptSolMaxRt DHCPv6Opt = 82
+	DHCPv6OptInfMaxRt DHCPv6Opt = 83
+
+	// RFC 7078 Distributing Address Selection Policy
+	DHCPv6OptAddrSel      DHCPv6Opt = 84
+	DHCPv6OptAddrSelTable DHCPv6Opt = 85
+
+	// RFC 7291 DHCP Options for the Port Control Protocol (PCP)
+	DHCPv6OptV6PCPServer DHCPv6Opt = 86
+
+	// RFC 7341 DHCPv4-over-DHCPv6 (DHCP 4o6) Transport
+	DHCPv6OptDHCPv4Message          DHCPv6Opt = 87
+	DHCPv6OptDHCPv4OverDHCPv6Server DHCPv6Opt = 88
+
+	// RFC 7598 Configuration of Softwire Address and Port-Mapped Clients
+	DHCPv6OptS46Rule           DHCPv6Opt = 89
+	DHCPv6OptS46BR             DHCPv6Opt = 90
+	DHCPv6OptS46DMR            DHCPv6Opt = 91
+	DHCPv6OptS46V4V4Bind       DHCPv6Opt = 92
+	DHCPv6OptS46PortParameters DHCPv6Opt = 93
+	DHCPv6OptS46ContMAPE       DHCPv6Opt = 94
+	DHCPv6OptS46ContMAPT       DHCPv6Opt = 95
+	DHCPv6OptS46ContLW         DHCPv6Opt = 96
+
+	// RFC 7600 IPv4 Residual Deployment via IPv6
+	DHCPv6Opt4RD           DHCPv6Opt = 97
+	DHCPv6Opt4RDMapRule    DHCPv6Opt = 98
+	DHCPv6Opt4RDNonMapRule DHCPv6Opt = 99
+
+	// RFC 7653 Active Leasequery
+	DHCPv6OptLQBaseTime  DHCPv6Opt = 100
+	DHCPv6OptLQStartTime DHCPv6Opt = 101
+	DHCPv6OptLQEndTime   DHCPv6Opt = 102
+
+	// RFC 7710 Captive-Portal Identification
+	DHCPv6OptCaptivePortal DHCPv6Opt = 103
+
+	// RFC 7774 Multicast Protocol for Low-Power and Lossy Networks (MPL) Parameter Configuration
+	DHCPv6OptMPLParameters DHCPv6Opt = 104
+
+	// RFC 7839 Access-Network-Identifier (ANI)
+	DHCPv6OptANIATT           DHCPv6Opt = 105
+	DHCPv6OptANINetworkName   DHCPv6Opt = 106
+	DHCPv6OptANIAPName        DHCPv6Opt = 107
+	DHCPv6OptANIAPBSSID       DHCPv6Opt = 108
+	DHCPv6OptANIOperatorID    DHCPv6Opt = 109
+	DHCPv6OptANIOperatorRealm DHCPv6Opt = 110
+
+	// RFC 8026 Unified IPv4-in-IPv6 Softwire Customer Premises Equipment (CPE)
+	DHCPv6OptS46Priority DHCPv6Opt = 111
+
+	// draft-ietf-opsawg-mud-25 Manufacturer Usage Description (MUD)
+	DHCPv6OptMUDURLV6 DHCPv6Opt = 112
+
+	// RFC 8115 IPv4-Embedded Multicast and Unicast IPv6 Prefixes
+	DHCPv6OptV6Prefix64 DHCPv6Opt = 113
+
+	// RFC 8156 DHCPv6 Failover Protocol
+	DHCPv6OptFBindingStatus           DHCPv6Opt = 114
+	DHCPv6OptFConnectFlags            DHCPv6Opt = 115
+	DHCPv6OptFDNSRemovalInfo          DHCPv6Opt = 116
+	DHCPv6OptFDNSHostName             DHCPv6Opt = 117
+	DHCPv6OptFDNSZoneName             DHCPv6Opt = 118
+	DHCPv6OptFDNSFlags                DHCPv6Opt = 119
+	DHCPv6OptFExpirationTime          DHCPv6Opt = 120
+	DHCPv6OptFMaxUnacknowledgedBNDUPD DHCPv6Opt = 121
+	DHCPv6OptFMCLT                    DHCPv6Opt = 122
+	DHCPv6OptFPartnerLifetime         DHCPv6Opt = 123
+	DHCPv6OptFPartnerLifetimeSent     DHCPv6Opt = 124
+	DHCPv6OptFPartnerDownTime         DHCPv6Opt = 125
+	DHCPv6OptFPartnerRawCltTime       DHCPv6Opt = 126
+	DHCPv6OptFProtocolVersion         DHCPv6Opt = 127
+	DHCPv6OptFKeepaliveTime           DHCPv6Opt = 128
+	DHCPv6OptFReconfigureData         DHCPv6Opt = 129
+	DHCPv6OptFRelationshipName        DHCPv6Opt = 130
+	DHCPv6OptFServerFlags             DHCPv6Opt = 131
+	DHCPv6OptFServerState             DHCPv6Opt = 132
+	DHCPv6OptFStartTimeOfState        DHCPv6Opt = 133
+	DHCPv6OptFStateExpirationTime     DHCPv6Opt = 134
+
+	// RFC 8357 Generalized UDP Source Port for DHCP Relay
+	DHCPv6OptRelayPort DHCPv6Opt = 135
+
+	// draft-ietf-netconf-zerotouch-25 Zero Touch Provisioning for Networking Devices
+	DHCPv6OptV6ZeroTouchRedirect DHCPv6Opt = 136
+
+	// RFC 6153 Access Network Discovery and Selection Function (ANDSF) Discovery
+	DHCPv6OptIPV6AddressANDSF DHCPv6Opt = 143
+)
+
+// String returns a string version of a DHCPv6Opt.
+func (o DHCPv6Opt) String() string {
+	switch o {
+	case DHCPv6OptClientID:
+		return "ClientID"
+	case DHCPv6OptServerID:
+		return "ServerID"
+	case DHCPv6OptIANA:
+		return "IA_NA"
+	case DHCPv6OptIATA:
+		return "IA_TA"
+	case DHCPv6OptIAAddr:
+		return "IAAddr"
+	case DHCPv6OptOro:
+		return "Oro"
+	case DHCPv6OptPreference:
+		return "Preference"
+	case DHCPv6OptElapsedTime:
+		return "ElapsedTime"
+	case DHCPv6OptRelayMessage:
+		return "RelayMessage"
+	case DHCPv6OptAuth:
+		return "Auth"
+	case DHCPv6OptUnicast:
+		return "Unicast"
+	case DHCPv6OptStatusCode:
+		return "StatusCode"
+	case DHCPv6OptRapidCommit:
+		return "RapidCommit"
+	case DHCPv6OptUserClass:
+		return "UserClass"
+	case DHCPv6OptVendorClass:
+		return "VendorClass"
+	case DHCPv6OptVendorOpts:
+		return "VendorOpts"
+	case DHCPv6OptInterfaceID:
+		return "InterfaceID"
+	case DHCPv6OptReconfigureMessage:
+		return "ReconfigureMessage"
+	case DHCPv6OptReconfigureAccept:
+		return "ReconfigureAccept"
+	case DHCPv6OptSIPServersDomainList:
+		return "SIPServersDomainList"
+	case DHCPv6OptSIPServersAddressList:
+		return "SIPServersAddressList"
+	case DHCPv6OptDNSServers:
+		return "DNSRecursiveNameServer"
+	case DHCPv6OptDomainList:
+		return "DomainSearchList"
+	case DHCPv6OptIAPD:
+		return "IdentityAssociationPrefixDelegation"
+	case DHCPv6OptIAPrefix:
+		return "IAPDPrefix"
+	case DHCPv6OptNISServers:
+		return "NISServers"
+	case DHCPv6OptNISPServers:
+		return "NISv2Servers"
+	case DHCPv6OptNISDomainName:
+		return "NISDomainName"
+	case DHCPv6OptNISPDomainName:
+		return "NISv2DomainName"
+	case DHCPv6OptSNTPServers:
+		return "SNTPServers"
+	case DHCPv6OptInformationRefreshTime:
+		return "InformationRefreshTime"
+	case DHCPv6OptBCMCSServerDomainNameList:
+		return "BCMCSControlServersDomainNameList"
+	case DHCPv6OptBCMCSServerAddressList:
+		return "BCMCSControlServersAddressList"
+	case DHCPv6OptGeoconfCivic:
+		return "CivicAddress"
+	case DHCPv6OptRemoteID:
+		return "RelayAgentRemoteID"
+	case DHCPv6OptSubscriberID:
+		return "RelayAgentSubscriberID"
+	case DHCPv6OptClientFQDN:
+		return "ClientFQDN"
+	case DHCPv6OptPanaAgent:
+		return "PANAAuthenticationAgent"
+	case DHCPv6OptNewPOSIXTimezone:
+		return "NewPOSIXTimezone"
+	case DHCPv6OptNewTZDBTimezone:
+		return "NewTZDBTimezone"
+	case DHCPv6OptEchoRequestOption:
+		return "EchoRequest"
+	case DHCPv6OptLQQuery:
+		return "LeasequeryQuery"
+	case DHCPv6OptClientData:
+		return "LeasequeryClientData"
+	case DHCPv6OptCLTTime:
+		return "LeasequeryClientLastTransactionTime"
+	case DHCPv6OptLQRelayData:
+		return "LeasequeryRelayData"
+	case DHCPv6OptLQClientLink:
+		return "LeasequeryClientLink"
+	case DHCPv6OptMIP6HNIDF:
+		return "MIPv6HomeNetworkIDFQDN"
+	case DHCPv6OptMIP6VDINF:
+		return "MIPv6VisitedHomeNetworkInformation"
+	case DHCPv6OptMIP6IDINF:
+		return "MIPv6IdentifiedHomeNetworkInformation"
+	case DHCPv6OptMIP6UDINF:
+		return "MIPv6UnrestrictedHomeNetworkInformation"
+	case DHCPv6OptMIP6HNP:
+		return "MIPv6HomeNetworkPrefix"
+	case DHCPv6OptMIP6HAA:
+		return "MIPv6HomeAgentAddress"
+	case DHCPv6OptMIP6HAF:
+		return "MIPv6HomeAgentFQDN"
+	case DHCPv6OptV6LOST:
+		return "LoST Server"
+	case DHCPv6OptCAPWAPACV6:
+		return "CAPWAPAccessControllerV6"
+	case DHCPv6OptRelayID:
+		return "LeasequeryRelayID"
+	case DHCPv6OptIPv6AddressMoS:
+		return "MoSIPv6Address"
+	case DHCPv6OptIPv6FQDNMoS:
+		return "MoSDomainNameList"
+	case DHCPv6OptNTPServer:
+		return "NTPServer"
+	case DHCPv6OptV6AccessDomain:
+		return "AccessNetworkDomainName"
+	case DHCPv6OptSIPUACSList:
+		return "SIPUserAgentConfigurationServiceDomains"
+	case DHCPv6OptBootFileURL:
+		return "BootFileURL"
+	case DHCPv6OptBootFileParam:
+		return "BootFileParameters"
+	case DHCPv6OptClientArchType:
+		return "ClientSystemArchitectureType"
+	case DHCPv6OptNII:
+		return "ClientNetworkInterfaceIdentifier"
+	case DHCPv6OptGeolocation:
+		return "Geolocation"
+	case DHCPv6OptAFTRName:
+		return "AFTRName"
+	case DHCPv6OptERPLocalDomainName:
+		return "AFTRName"
+	case DHCPv6OptRSOO:
+		return "RSOOption"
+	case DHCPv6OptPDExclude:
+		return "PrefixExclude"
+	case DHCPv6OptVSS:
+		return "VirtualSubnetSelection"
+	case DHCPv6OptRDNSSSelection:
+		return "RDNSSSelection"
+	case DHCPv6OptKRBPrincipalName:
+		return "KerberosPrincipalName"
+	case DHCPv6OptKRBRealmName:
+		return "KerberosRealmName"
+	case DHCPv6OptKRBKDC:
+		return "KerberosKDC"
+	case DHCPv6OptClientLinkLayerAddress:
+		return "ClientLinkLayerAddress"
+	case DHCPv6OptLinkAddress:
+		return "LinkAddress"
+	case DHCPv6OptRADIUS:
+		return "RADIUS"
+	case DHCPv6OptSolMaxRt:
+		return "SolMaxRt"
+	case DHCPv6OptInfMaxRt:
+		return "InfMaxRt"
+	case DHCPv6OptAddrSel:
+		return "AddressSelection"
+	case DHCPv6OptAddrSelTable:
+		return "AddressSelectionTable"
+	case DHCPv6OptV6PCPServer:
+		return "PCPServer"
+	case DHCPv6OptDHCPv4Message:
+		return "DHCPv4Message"
+	case DHCPv6OptDHCPv4OverDHCPv6Server:
+		return "DHCP4o6ServerAddress"
+	case DHCPv6OptS46Rule:
+		return "S46Rule"
+	case DHCPv6OptS46BR:
+		return "S46BR"
+	case DHCPv6OptS46DMR:
+		return "S46DMR"
+	case DHCPv6OptS46V4V4Bind:
+		return "S46IPv4IPv6AddressBinding"
+	case DHCPv6OptS46PortParameters:
+		return "S46PortParameters"
+	case DHCPv6OptS46ContMAPE:
+		return "S46MAPEContainer"
+	case DHCPv6OptS46ContMAPT:
+		return "S46MAPTContainer"
+	case DHCPv6OptS46ContLW:
+		return "S46Lightweight4Over6Container"
+	case DHCPv6Opt4RD:
+		return "4RD"
+	case DHCPv6Opt4RDMapRule:
+		return "4RDMapRule"
+	case DHCPv6Opt4RDNonMapRule:
+		return "4RDNonMapRule"
+	case DHCPv6OptLQBaseTime:
+		return "LQBaseTime"
+	case DHCPv6OptLQStartTime:
+		return "LQStartTime"
+	case DHCPv6OptLQEndTime:
+		return "LQEndTime"
+	case DHCPv6OptCaptivePortal:
+		return "CaptivePortal"
+	case DHCPv6OptMPLParameters:
+		return "MPLParameterConfiguration"
+	case DHCPv6OptANIATT:
+		return "ANIAccessTechnologyType"
+	case DHCPv6OptANINetworkName:
+		return "ANINetworkName"
+	case DHCPv6OptANIAPName:
+		return "ANIAccessPointName"
+	case DHCPv6OptANIAPBSSID:
+		return "ANIAccessPointBSSID"
+	case DHCPv6OptANIOperatorID:
+		return "ANIOperatorIdentifier"
+	case DHCPv6OptANIOperatorRealm:
+		return "ANIOperatorRealm"
+	case DHCPv6OptS46Priority:
+		return "S64Priority"
+	case DHCPv6OptMUDURLV6:
+		return "ManufacturerUsageDescriptionURL"
+	case DHCPv6OptV6Prefix64:
+		return "V6Prefix64"
+	case DHCPv6OptFBindingStatus:
+		return "FailoverBindingStatus"
+	case DHCPv6OptFConnectFlags:
+		return "FailoverConnectFlags"
+	case DHCPv6OptFDNSRemovalInfo:
+		return "FailoverDNSRemovalInfo"
+	case DHCPv6OptFDNSHostName:
+		return "FailoverDNSHostName"
+	case DHCPv6OptFDNSZoneName:
+		return "FailoverDNSZoneName"
+	case DHCPv6OptFDNSFlags:
+		return "FailoverDNSFlags"
+	case DHCPv6OptFExpirationTime:
+		return "FailoverExpirationTime"
+	case DHCPv6OptFMaxUnacknowledgedBNDUPD:
+		return "FailoverMaxUnacknowledgedBNDUPDMessages"
+	case DHCPv6OptFMCLT:
+		return "FailoverMaximumClientLeadTime"
+	case DHCPv6OptFPartnerLifetime:
+		return "FailoverPartnerLifetime"
+	case DHCPv6OptFPartnerLifetimeSent:
+		return "FailoverPartnerLifetimeSent"
+	case DHCPv6OptFPartnerDownTime:
+		return "FailoverPartnerDownTime"
+	case DHCPv6OptFPartnerRawCltTime:
+		return "FailoverPartnerRawClientLeadTime"
+	case DHCPv6OptFProtocolVersion:
+		return "FailoverProtocolVersion"
+	case DHCPv6OptFKeepaliveTime:
+		return "FailoverKeepaliveTime"
+	case DHCPv6OptFReconfigureData:
+		return "FailoverReconfigureData"
+	case DHCPv6OptFRelationshipName:
+		return "FailoverRelationshipName"
+	case DHCPv6OptFServerFlags:
+		return "FailoverServerFlags"
+	case DHCPv6OptFServerState:
+		return "FailoverServerState"
+	case DHCPv6OptFStartTimeOfState:
+		return "FailoverStartTimeOfState"
+	case DHCPv6OptFStateExpirationTime:
+		return "FailoverStateExpirationTime"
+	case DHCPv6OptRelayPort:
+		return "RelayPort"
+	case DHCPv6OptV6ZeroTouchRedirect:
+		return "ZeroTouch"
+	case DHCPv6OptIPV6AddressANDSF:
+		return "ANDSFIPv6Address"
+	default:
+		return fmt.Sprintf("Unknown(%d)", uint16(o))
+	}
+}
+
+// DHCPv6Options is used to get nicely printed option lists which would normally
+// be cut off after 5 options.
+type DHCPv6Options []DHCPv6Option
+
+// String returns a string version of the options list.
+func (o DHCPv6Options) String() string {
+	buf := &bytes.Buffer{}
+	buf.WriteByte('[')
+	for i, opt := range o {
+		buf.WriteString(opt.String())
+		if i+1 != len(o) {
+			buf.WriteString(", ")
+		}
+	}
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+// DHCPv6Option rerpresents a DHCP option.
+type DHCPv6Option struct {
+	Code   DHCPv6Opt
+	Length uint16
+	Data   []byte
+}
+
+// String returns a string version of a DHCP Option.
+func (o DHCPv6Option) String() string {
+	switch o.Code {
+	case DHCPv6OptClientID, DHCPv6OptServerID:
+		duid, err := decodeDHCPv6DUID(o.Data)
+		if err != nil {
+			return fmt.Sprintf("Option(%s:INVALID)", o.Code)
+		}
+		return fmt.Sprintf("Option(%s:[%s])", o.Code, duid.String())
+	case DHCPv6OptOro:
+		options := ""
+		for i := 0; i < int(o.Length); i += 2 {
+			if options != "" {
+				options += ","
+			}
+			option := DHCPv6Opt(binary.BigEndian.Uint16(o.Data[i : i+2]))
+			options += option.String()
+		}
+		return fmt.Sprintf("Option(%s:[%s])", o.Code, options)
+	default:
+		return fmt.Sprintf("Option(%s:%v)", o.Code, o.Data)
+	}
+}
+
+// NewDHCPv6Option constructs a new DHCPv6Option with a given type and data.
+func NewDHCPv6Option(code DHCPv6Opt, data []byte) DHCPv6Option {
+	o := DHCPv6Option{Code: code}
+	if data != nil {
+		o.Data = data
+		o.Length = uint16(len(data))
+	}
+
+	return o
+}
+
+func (o *DHCPv6Option) encode(b []byte, opts gopacket.SerializeOptions) error {
+	binary.BigEndian.PutUint16(b[0:2], uint16(o.Code))
+	if opts.FixLengths {
+		binary.BigEndian.PutUint16(b[2:4], uint16(len(o.Data)))
+	} else {
+		binary.BigEndian.PutUint16(b[2:4], o.Length)
+	}
+	copy(b[4:], o.Data)
+
+	return nil
+}
+
+func (o *DHCPv6Option) decode(data []byte) error {
+	if len(data) < 4 {
+		return errors.New("not enough data to decode")
+	}
+	o.Code = DHCPv6Opt(binary.BigEndian.Uint16(data[0:2]))
+	o.Length = binary.BigEndian.Uint16(data[2:4])
+	if len(data) < 4+int(o.Length) {
+		return fmt.Errorf("dhcpv6 option size < length %d", 4+o.Length)
+	}
+	o.Data = data[4 : 4+o.Length]
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/dns.go b/vendor/github.com/google/gopacket/layers/dns.go
new file mode 100644
index 0000000..b639dec
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dns.go
@@ -0,0 +1,1100 @@
+// Copyright 2014, 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+// DNSClass defines the class associated with a request/response.  Different DNS
+// classes can be thought of as an array of parallel namespace trees.
+type DNSClass uint16
+
+// DNSClass known values.
+const (
+	DNSClassIN  DNSClass = 1   // Internet
+	DNSClassCS  DNSClass = 2   // the CSNET class (Obsolete)
+	DNSClassCH  DNSClass = 3   // the CHAOS class
+	DNSClassHS  DNSClass = 4   // Hesiod [Dyer 87]
+	DNSClassAny DNSClass = 255 // AnyClass
+)
+
+func (dc DNSClass) String() string {
+	switch dc {
+	default:
+		return "Unknown"
+	case DNSClassIN:
+		return "IN"
+	case DNSClassCS:
+		return "CS"
+	case DNSClassCH:
+		return "CH"
+	case DNSClassHS:
+		return "HS"
+	case DNSClassAny:
+		return "Any"
+	}
+}
+
+// DNSType defines the type of data being requested/returned in a
+// question/answer.
+type DNSType uint16
+
+// DNSType known values.
+const (
+	DNSTypeA     DNSType = 1   // a host address
+	DNSTypeNS    DNSType = 2   // an authoritative name server
+	DNSTypeMD    DNSType = 3   // a mail destination (Obsolete - use MX)
+	DNSTypeMF    DNSType = 4   // a mail forwarder (Obsolete - use MX)
+	DNSTypeCNAME DNSType = 5   // the canonical name for an alias
+	DNSTypeSOA   DNSType = 6   // marks the start of a zone of authority
+	DNSTypeMB    DNSType = 7   // a mailbox domain name (EXPERIMENTAL)
+	DNSTypeMG    DNSType = 8   // a mail group member (EXPERIMENTAL)
+	DNSTypeMR    DNSType = 9   // a mail rename domain name (EXPERIMENTAL)
+	DNSTypeNULL  DNSType = 10  // a null RR (EXPERIMENTAL)
+	DNSTypeWKS   DNSType = 11  // a well known service description
+	DNSTypePTR   DNSType = 12  // a domain name pointer
+	DNSTypeHINFO DNSType = 13  // host information
+	DNSTypeMINFO DNSType = 14  // mailbox or mail list information
+	DNSTypeMX    DNSType = 15  // mail exchange
+	DNSTypeTXT   DNSType = 16  // text strings
+	DNSTypeAAAA  DNSType = 28  // a IPv6 host address [RFC3596]
+	DNSTypeSRV   DNSType = 33  // server discovery [RFC2782] [RFC6195]
+	DNSTypeOPT   DNSType = 41  // OPT Pseudo-RR [RFC6891]
+	DNSTypeURI   DNSType = 256 // URI RR [RFC7553]
+)
+
+func (dt DNSType) String() string {
+	switch dt {
+	default:
+		return "Unknown"
+	case DNSTypeA:
+		return "A"
+	case DNSTypeNS:
+		return "NS"
+	case DNSTypeMD:
+		return "MD"
+	case DNSTypeMF:
+		return "MF"
+	case DNSTypeCNAME:
+		return "CNAME"
+	case DNSTypeSOA:
+		return "SOA"
+	case DNSTypeMB:
+		return "MB"
+	case DNSTypeMG:
+		return "MG"
+	case DNSTypeMR:
+		return "MR"
+	case DNSTypeNULL:
+		return "NULL"
+	case DNSTypeWKS:
+		return "WKS"
+	case DNSTypePTR:
+		return "PTR"
+	case DNSTypeHINFO:
+		return "HINFO"
+	case DNSTypeMINFO:
+		return "MINFO"
+	case DNSTypeMX:
+		return "MX"
+	case DNSTypeTXT:
+		return "TXT"
+	case DNSTypeAAAA:
+		return "AAAA"
+	case DNSTypeSRV:
+		return "SRV"
+	case DNSTypeOPT:
+		return "OPT"
+	case DNSTypeURI:
+		return "URI"
+	}
+}
+
+// DNSResponseCode provides response codes for question answers.
+type DNSResponseCode uint8
+
+// DNSResponseCode known values.
+const (
+	DNSResponseCodeNoErr     DNSResponseCode = 0  // No error
+	DNSResponseCodeFormErr   DNSResponseCode = 1  // Format Error                       [RFC1035]
+	DNSResponseCodeServFail  DNSResponseCode = 2  // Server Failure                     [RFC1035]
+	DNSResponseCodeNXDomain  DNSResponseCode = 3  // Non-Existent Domain                [RFC1035]
+	DNSResponseCodeNotImp    DNSResponseCode = 4  // Not Implemented                    [RFC1035]
+	DNSResponseCodeRefused   DNSResponseCode = 5  // Query Refused                      [RFC1035]
+	DNSResponseCodeYXDomain  DNSResponseCode = 6  // Name Exists when it should not     [RFC2136]
+	DNSResponseCodeYXRRSet   DNSResponseCode = 7  // RR Set Exists when it should not   [RFC2136]
+	DNSResponseCodeNXRRSet   DNSResponseCode = 8  // RR Set that should exist does not  [RFC2136]
+	DNSResponseCodeNotAuth   DNSResponseCode = 9  // Server Not Authoritative for zone  [RFC2136]
+	DNSResponseCodeNotZone   DNSResponseCode = 10 // Name not contained in zone         [RFC2136]
+	DNSResponseCodeBadVers   DNSResponseCode = 16 // Bad OPT Version                    [RFC2671]
+	DNSResponseCodeBadSig    DNSResponseCode = 16 // TSIG Signature Failure             [RFC2845]
+	DNSResponseCodeBadKey    DNSResponseCode = 17 // Key not recognized                 [RFC2845]
+	DNSResponseCodeBadTime   DNSResponseCode = 18 // Signature out of time window       [RFC2845]
+	DNSResponseCodeBadMode   DNSResponseCode = 19 // Bad TKEY Mode                      [RFC2930]
+	DNSResponseCodeBadName   DNSResponseCode = 20 // Duplicate key name                 [RFC2930]
+	DNSResponseCodeBadAlg    DNSResponseCode = 21 // Algorithm not supported            [RFC2930]
+	DNSResponseCodeBadTruc   DNSResponseCode = 22 // Bad Truncation                     [RFC4635]
+	DNSResponseCodeBadCookie DNSResponseCode = 23 // Bad/missing Server Cookie          [RFC7873]
+)
+
+func (drc DNSResponseCode) String() string {
+	switch drc {
+	default:
+		return "Unknown"
+	case DNSResponseCodeNoErr:
+		return "No Error"
+	case DNSResponseCodeFormErr:
+		return "Format Error"
+	case DNSResponseCodeServFail:
+		return "Server Failure "
+	case DNSResponseCodeNXDomain:
+		return "Non-Existent Domain"
+	case DNSResponseCodeNotImp:
+		return "Not Implemented"
+	case DNSResponseCodeRefused:
+		return "Query Refused"
+	case DNSResponseCodeYXDomain:
+		return "Name Exists when it should not"
+	case DNSResponseCodeYXRRSet:
+		return "RR Set Exists when it should not"
+	case DNSResponseCodeNXRRSet:
+		return "RR Set that should exist does not"
+	case DNSResponseCodeNotAuth:
+		return "Server Not Authoritative for zone"
+	case DNSResponseCodeNotZone:
+		return "Name not contained in zone"
+	case DNSResponseCodeBadVers:
+		return "Bad OPT Version"
+	case DNSResponseCodeBadKey:
+		return "Key not recognized"
+	case DNSResponseCodeBadTime:
+		return "Signature out of time window"
+	case DNSResponseCodeBadMode:
+		return "Bad TKEY Mode"
+	case DNSResponseCodeBadName:
+		return "Duplicate key name"
+	case DNSResponseCodeBadAlg:
+		return "Algorithm not supported"
+	case DNSResponseCodeBadTruc:
+		return "Bad Truncation"
+	case DNSResponseCodeBadCookie:
+		return "Bad Cookie"
+	}
+}
+
+// DNSOpCode defines a set of different operation types.
+type DNSOpCode uint8
+
+// DNSOpCode known values.
+const (
+	DNSOpCodeQuery  DNSOpCode = 0 // Query                  [RFC1035]
+	DNSOpCodeIQuery DNSOpCode = 1 // Inverse Query Obsolete [RFC3425]
+	DNSOpCodeStatus DNSOpCode = 2 // Status                 [RFC1035]
+	DNSOpCodeNotify DNSOpCode = 4 // Notify                 [RFC1996]
+	DNSOpCodeUpdate DNSOpCode = 5 // Update                 [RFC2136]
+)
+
+func (doc DNSOpCode) String() string {
+	switch doc {
+	default:
+		return "Unknown"
+	case DNSOpCodeQuery:
+		return "Query"
+	case DNSOpCodeIQuery:
+		return "Inverse Query"
+	case DNSOpCodeStatus:
+		return "Status"
+	case DNSOpCodeNotify:
+		return "Notify"
+	case DNSOpCodeUpdate:
+		return "Update"
+	}
+}
+
+// DNS is specified in RFC 1034 / RFC 1035
+// +---------------------+
+// |        Header       |
+// +---------------------+
+// |       Question      | the question for the name server
+// +---------------------+
+// |        Answer       | RRs answering the question
+// +---------------------+
+// |      Authority      | RRs pointing toward an authority
+// +---------------------+
+// |      Additional     | RRs holding additional information
+// +---------------------+
+//
+//  DNS Header
+//  0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      ID                       |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |QR|   Opcode  |AA|TC|RD|RA|   Z    |   RCODE   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    QDCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    ANCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    NSCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    ARCOUNT                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNS contains data from a single Domain Name Service packet.
+type DNS struct {
+	BaseLayer
+
+	// Header fields
+	ID     uint16
+	QR     bool
+	OpCode DNSOpCode
+
+	AA bool  // Authoritative answer
+	TC bool  // Truncated
+	RD bool  // Recursion desired
+	RA bool  // Recursion available
+	Z  uint8 // Reserved for future use
+
+	ResponseCode DNSResponseCode
+	QDCount      uint16 // Number of questions to expect
+	ANCount      uint16 // Number of answers to expect
+	NSCount      uint16 // Number of authorities to expect
+	ARCount      uint16 // Number of additional records to expect
+
+	// Entries
+	Questions   []DNSQuestion
+	Answers     []DNSResourceRecord
+	Authorities []DNSResourceRecord
+	Additionals []DNSResourceRecord
+
+	// buffer for doing name decoding.  We use a single reusable buffer to avoid
+	// name decoding on a single object via multiple DecodeFromBytes calls
+	// requiring constant allocation of small byte slices.
+	buffer []byte
+}
+
+// LayerType returns gopacket.LayerTypeDNS.
+func (d *DNS) LayerType() gopacket.LayerType { return LayerTypeDNS }
+
+// decodeDNS decodes the byte slice into a DNS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeDNS(data []byte, p gopacket.PacketBuilder) error {
+	d := &DNS{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+	return nil
+}
+
+// DecodeFromBytes decodes the slice into the DNS struct.
+func (d *DNS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	d.buffer = d.buffer[:0]
+
+	if len(data) < 12 {
+		df.SetTruncated()
+		return errDNSPacketTooShort
+	}
+
+	// since there are no further layers, the baselayer's content is
+	// pointing to this layer
+	d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+	d.ID = binary.BigEndian.Uint16(data[:2])
+	d.QR = data[2]&0x80 != 0
+	d.OpCode = DNSOpCode(data[2]>>3) & 0x0F
+	d.AA = data[2]&0x04 != 0
+	d.TC = data[2]&0x02 != 0
+	d.RD = data[2]&0x01 != 0
+	d.RA = data[3]&0x80 != 0
+	d.Z = uint8(data[3]>>4) & 0x7
+	d.ResponseCode = DNSResponseCode(data[3] & 0xF)
+	d.QDCount = binary.BigEndian.Uint16(data[4:6])
+	d.ANCount = binary.BigEndian.Uint16(data[6:8])
+	d.NSCount = binary.BigEndian.Uint16(data[8:10])
+	d.ARCount = binary.BigEndian.Uint16(data[10:12])
+
+	d.Questions = d.Questions[:0]
+	d.Answers = d.Answers[:0]
+	d.Authorities = d.Authorities[:0]
+	d.Additionals = d.Additionals[:0]
+
+	offset := 12
+	var err error
+	for i := 0; i < int(d.QDCount); i++ {
+		var q DNSQuestion
+		if offset, err = q.decode(data, offset, df, &d.buffer); err != nil {
+			return err
+		}
+		d.Questions = append(d.Questions, q)
+	}
+
+	// For some horrible reason, if we do the obvious thing in this loop:
+	//   var r DNSResourceRecord
+	//   if blah := r.decode(blah); err != nil {
+	//     return err
+	//   }
+	//   d.Foo = append(d.Foo, r)
+	// the Go compiler thinks that 'r' escapes to the heap, causing a malloc for
+	// every Answer, Authority, and Additional.  To get around this, we do
+	// something really silly:  we append an empty resource record to our slice,
+	// then use the last value in the slice to call decode.  Since the value is
+	// already in the slice, there's no WAY it can escape... on the other hand our
+	// code is MUCH uglier :(
+	for i := 0; i < int(d.ANCount); i++ {
+		d.Answers = append(d.Answers, DNSResourceRecord{})
+		if offset, err = d.Answers[i].decode(data, offset, df, &d.buffer); err != nil {
+			d.Answers = d.Answers[:i] // strip off erroneous value
+			return err
+		}
+	}
+	for i := 0; i < int(d.NSCount); i++ {
+		d.Authorities = append(d.Authorities, DNSResourceRecord{})
+		if offset, err = d.Authorities[i].decode(data, offset, df, &d.buffer); err != nil {
+			d.Authorities = d.Authorities[:i] // strip off erroneous value
+			return err
+		}
+	}
+	for i := 0; i < int(d.ARCount); i++ {
+		d.Additionals = append(d.Additionals, DNSResourceRecord{})
+		if offset, err = d.Additionals[i].decode(data, offset, df, &d.buffer); err != nil {
+			d.Additionals = d.Additionals[:i] // strip off erroneous value
+			return err
+		}
+		// extract extended RCODE from OPT RRs, RFC 6891 section 6.1.3
+		if d.Additionals[i].Type == DNSTypeOPT {
+			d.ResponseCode = DNSResponseCode(uint8(d.ResponseCode) | uint8(d.Additionals[i].TTL>>20&0xF0))
+		}
+	}
+
+	if uint16(len(d.Questions)) != d.QDCount {
+		return errDecodeQueryBadQDCount
+	} else if uint16(len(d.Answers)) != d.ANCount {
+		return errDecodeQueryBadANCount
+	} else if uint16(len(d.Authorities)) != d.NSCount {
+		return errDecodeQueryBadNSCount
+	} else if uint16(len(d.Additionals)) != d.ARCount {
+		return errDecodeQueryBadARCount
+	}
+	return nil
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (d *DNS) CanDecode() gopacket.LayerClass {
+	return LayerTypeDNS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (d *DNS) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// Payload returns nil.
+func (d *DNS) Payload() []byte {
+	return nil
+}
+
+func b2i(b bool) int {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+func recSize(rr *DNSResourceRecord) int {
+	switch rr.Type {
+	case DNSTypeA:
+		return 4
+	case DNSTypeAAAA:
+		return 16
+	case DNSTypeNS:
+		return len(rr.NS) + 2
+	case DNSTypeCNAME:
+		return len(rr.CNAME) + 2
+	case DNSTypePTR:
+		return len(rr.PTR) + 2
+	case DNSTypeSOA:
+		return len(rr.SOA.MName) + 2 + len(rr.SOA.RName) + 2 + 20
+	case DNSTypeMX:
+		return 2 + len(rr.MX.Name) + 2
+	case DNSTypeTXT:
+		l := len(rr.TXTs)
+		for _, txt := range rr.TXTs {
+			l += len(txt)
+		}
+		return l
+	case DNSTypeSRV:
+		return 6 + len(rr.SRV.Name) + 2
+	case DNSTypeURI:
+		return 4 + len(rr.URI.Target)
+	case DNSTypeOPT:
+		l := len(rr.OPT) * 4
+		for _, opt := range rr.OPT {
+			l += len(opt.Data)
+		}
+		return l
+	}
+
+	return 0
+}
+
+func computeSize(recs []DNSResourceRecord) int {
+	sz := 0
+	for _, rr := range recs {
+		v := len(rr.Name)
+
+		if v == 0 {
+			sz += v + 11
+		} else {
+			sz += v + 12
+		}
+
+		sz += recSize(&rr)
+	}
+	return sz
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (d *DNS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	dsz := 0
+	for _, q := range d.Questions {
+		dsz += len(q.Name) + 6
+	}
+	dsz += computeSize(d.Answers)
+	dsz += computeSize(d.Authorities)
+	dsz += computeSize(d.Additionals)
+
+	bytes, err := b.PrependBytes(12 + dsz)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, d.ID)
+	bytes[2] = byte((b2i(d.QR) << 7) | (int(d.OpCode) << 3) | (b2i(d.AA) << 2) | (b2i(d.TC) << 1) | b2i(d.RD))
+	bytes[3] = byte((b2i(d.RA) << 7) | (int(d.Z) << 4) | int(d.ResponseCode))
+
+	if opts.FixLengths {
+		d.QDCount = uint16(len(d.Questions))
+		d.ANCount = uint16(len(d.Answers))
+		d.NSCount = uint16(len(d.Authorities))
+		d.ARCount = uint16(len(d.Additionals))
+	}
+	binary.BigEndian.PutUint16(bytes[4:], d.QDCount)
+	binary.BigEndian.PutUint16(bytes[6:], d.ANCount)
+	binary.BigEndian.PutUint16(bytes[8:], d.NSCount)
+	binary.BigEndian.PutUint16(bytes[10:], d.ARCount)
+
+	off := 12
+	for _, qd := range d.Questions {
+		n := qd.encode(bytes, off)
+		off += n
+	}
+
+	for i := range d.Answers {
+		// done this way so we can modify DNSResourceRecord to fix
+		// lengths if requested
+		qa := &d.Answers[i]
+		n, err := qa.encode(bytes, off, opts)
+		if err != nil {
+			return err
+		}
+		off += n
+	}
+
+	for i := range d.Authorities {
+		qa := &d.Authorities[i]
+		n, err := qa.encode(bytes, off, opts)
+		if err != nil {
+			return err
+		}
+		off += n
+	}
+	for i := range d.Additionals {
+		qa := &d.Additionals[i]
+		n, err := qa.encode(bytes, off, opts)
+		if err != nil {
+			return err
+		}
+		off += n
+	}
+
+	return nil
+}
+
+const maxRecursionLevel = 255
+
+func decodeName(data []byte, offset int, buffer *[]byte, level int) ([]byte, int, error) {
+	if level > maxRecursionLevel {
+		return nil, 0, errMaxRecursion
+	} else if offset >= len(data) {
+		return nil, 0, errDNSNameOffsetTooHigh
+	} else if offset < 0 {
+		return nil, 0, errDNSNameOffsetNegative
+	}
+	start := len(*buffer)
+	index := offset
+	if data[index] == 0x00 {
+		return nil, index + 1, nil
+	}
+loop:
+	for data[index] != 0x00 {
+		switch data[index] & 0xc0 {
+		default:
+			/* RFC 1035
+			   A domain name represented as a sequence of labels, where
+			   each label consists of a length octet followed by that
+			   number of octets.  The domain name terminates with the
+			   zero length octet for the null label of the root.  Note
+			   that this field may be an odd number of octets; no
+			   padding is used.
+			*/
+			index2 := index + int(data[index]) + 1
+			if index2-offset > 255 {
+				return nil, 0, errDNSNameTooLong
+			} else if index2 < index+1 || index2 > len(data) {
+				return nil, 0, errDNSNameInvalidIndex
+			}
+			*buffer = append(*buffer, '.')
+			*buffer = append(*buffer, data[index+1:index2]...)
+			index = index2
+
+		case 0xc0:
+			/* RFC 1035
+			   The pointer takes the form of a two octet sequence.
+
+			   The first two bits are ones.  This allows a pointer to
+			   be distinguished from a label, since the label must
+			   begin with two zero bits because labels are restricted
+			   to 63 octets or less.  (The 10 and 01 combinations are
+			   reserved for future use.)  The OFFSET field specifies
+			   an offset from the start of the message (i.e., the
+			   first octet of the ID field in the domain header).  A
+			   zero offset specifies the first byte of the ID field,
+			   etc.
+
+			   The compression scheme allows a domain name in a message to be
+			   represented as either:
+			      - a sequence of labels ending in a zero octet
+			      - a pointer
+			      - a sequence of labels ending with a pointer
+			*/
+			if index+2 > len(data) {
+				return nil, 0, errDNSPointerOffsetTooHigh
+			}
+			offsetp := int(binary.BigEndian.Uint16(data[index:index+2]) & 0x3fff)
+			if offsetp > len(data) {
+				return nil, 0, errDNSPointerOffsetTooHigh
+			}
+			// This looks a little tricky, but actually isn't.  Because of how
+			// decodeName is written, calling it appends the decoded name to the
+			// current buffer.  We already have the start of the buffer, then, so
+			// once this call is done buffer[start:] will contain our full name.
+			_, _, err := decodeName(data, offsetp, buffer, level+1)
+			if err != nil {
+				return nil, 0, err
+			}
+			index++ // pointer is two bytes, so add an extra byte here.
+			break loop
+		/* EDNS, or other DNS option ? */
+		case 0x40: // RFC 2673
+			return nil, 0, fmt.Errorf("qname '0x40' - RFC 2673 unsupported yet (data=%x index=%d)",
+				data[index], index)
+
+		case 0x80:
+			return nil, 0, fmt.Errorf("qname '0x80' unsupported yet (data=%x index=%d)",
+				data[index], index)
+		}
+		if index >= len(data) {
+			return nil, 0, errDNSIndexOutOfRange
+		}
+	}
+	if len(*buffer) <= start {
+		return (*buffer)[start:], index + 1, nil
+	}
+	return (*buffer)[start+1:], index + 1, nil
+}
+
+// DNSQuestion wraps a single request (question) within a DNS query.
+type DNSQuestion struct {
+	Name  []byte
+	Type  DNSType
+	Class DNSClass
+}
+
+func (q *DNSQuestion) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+	name, endq, err := decodeName(data, offset, buffer, 1)
+	if err != nil {
+		return 0, err
+	}
+
+	q.Name = name
+	q.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+	q.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+
+	return endq + 4, nil
+}
+
+func (q *DNSQuestion) encode(data []byte, offset int) int {
+	noff := encodeName(q.Name, data, offset)
+	nSz := noff - offset
+	binary.BigEndian.PutUint16(data[noff:], uint16(q.Type))
+	binary.BigEndian.PutUint16(data[noff+2:], uint16(q.Class))
+	return nSz + 4
+}
+
+//  DNSResourceRecord
+//  0  1  2  3  4  5  6  7  8  9  0  1  2  3  4  5
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                                               |
+//  /                                               /
+//  /                      NAME                     /
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      TYPE                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                     CLASS                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      TTL                      |
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   RDLENGTH                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--|
+//  /                     RDATA                     /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// DNSResourceRecord wraps the data from a single DNS resource within a
+// response.
+type DNSResourceRecord struct {
+	// Header
+	Name  []byte
+	Type  DNSType
+	Class DNSClass
+	TTL   uint32
+
+	// RDATA Raw Values
+	DataLength uint16
+	Data       []byte
+
+	// RDATA Decoded Values
+	IP             net.IP
+	NS, CNAME, PTR []byte
+	TXTs           [][]byte
+	SOA            DNSSOA
+	SRV            DNSSRV
+	MX             DNSMX
+	OPT            []DNSOPT // See RFC 6891, section 6.1.2
+	URI            DNSURI
+
+	// Undecoded TXT for backward compatibility
+	TXT []byte
+}
+
+// decode decodes the resource record, returning the total length of the record.
+func (rr *DNSResourceRecord) decode(data []byte, offset int, df gopacket.DecodeFeedback, buffer *[]byte) (int, error) {
+	name, endq, err := decodeName(data, offset, buffer, 1)
+	if err != nil {
+		return 0, err
+	}
+
+	rr.Name = name
+	rr.Type = DNSType(binary.BigEndian.Uint16(data[endq : endq+2]))
+	rr.Class = DNSClass(binary.BigEndian.Uint16(data[endq+2 : endq+4]))
+	rr.TTL = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+	rr.DataLength = binary.BigEndian.Uint16(data[endq+8 : endq+10])
+	end := endq + 10 + int(rr.DataLength)
+	if end > len(data) {
+		return 0, errDecodeRecordLength
+	}
+	rr.Data = data[endq+10 : end]
+
+	if rr.DataLength > 0 {
+		if err = rr.decodeRData(data[:end], endq+10, buffer); err != nil {
+			return 0, err
+		}
+	}
+
+	return endq + 10 + int(rr.DataLength), nil
+}
+
+func encodeName(name []byte, data []byte, offset int) int {
+	l := 0
+	for i := range name {
+		if name[i] == '.' {
+			data[offset+i-l] = byte(l)
+			l = 0
+		} else {
+			// skip one to write the length
+			data[offset+i+1] = name[i]
+			l++
+		}
+	}
+
+	if len(name) == 0 {
+		data[offset] = 0x00 // terminal
+		return offset + 1
+	}
+
+	// length for final portion
+	data[offset+len(name)-l] = byte(l)
+	data[offset+len(name)+1] = 0x00 // terminal
+	return offset + len(name) + 2
+}
+
+func (rr *DNSResourceRecord) encode(data []byte, offset int, opts gopacket.SerializeOptions) (int, error) {
+
+	noff := encodeName(rr.Name, data, offset)
+	nSz := noff - offset
+
+	binary.BigEndian.PutUint16(data[noff:], uint16(rr.Type))
+	binary.BigEndian.PutUint16(data[noff+2:], uint16(rr.Class))
+	binary.BigEndian.PutUint32(data[noff+4:], uint32(rr.TTL))
+
+	switch rr.Type {
+	case DNSTypeA:
+		copy(data[noff+10:], rr.IP.To4())
+	case DNSTypeAAAA:
+		copy(data[noff+10:], rr.IP)
+	case DNSTypeNS:
+		encodeName(rr.NS, data, noff+10)
+	case DNSTypeCNAME:
+		encodeName(rr.CNAME, data, noff+10)
+	case DNSTypePTR:
+		encodeName(rr.PTR, data, noff+10)
+	case DNSTypeSOA:
+		noff2 := encodeName(rr.SOA.MName, data, noff+10)
+		noff2 = encodeName(rr.SOA.RName, data, noff2)
+		binary.BigEndian.PutUint32(data[noff2:], rr.SOA.Serial)
+		binary.BigEndian.PutUint32(data[noff2+4:], rr.SOA.Refresh)
+		binary.BigEndian.PutUint32(data[noff2+8:], rr.SOA.Retry)
+		binary.BigEndian.PutUint32(data[noff2+12:], rr.SOA.Expire)
+		binary.BigEndian.PutUint32(data[noff2+16:], rr.SOA.Minimum)
+	case DNSTypeMX:
+		binary.BigEndian.PutUint16(data[noff+10:], rr.MX.Preference)
+		encodeName(rr.MX.Name, data, noff+12)
+	case DNSTypeTXT:
+		noff2 := noff + 10
+		for _, txt := range rr.TXTs {
+			data[noff2] = byte(len(txt))
+			copy(data[noff2+1:], txt)
+			noff2 += 1 + len(txt)
+		}
+	case DNSTypeSRV:
+		binary.BigEndian.PutUint16(data[noff+10:], rr.SRV.Priority)
+		binary.BigEndian.PutUint16(data[noff+12:], rr.SRV.Weight)
+		binary.BigEndian.PutUint16(data[noff+14:], rr.SRV.Port)
+		encodeName(rr.SRV.Name, data, noff+16)
+	case DNSTypeURI:
+		binary.BigEndian.PutUint16(data[noff+10:], rr.URI.Priority)
+		binary.BigEndian.PutUint16(data[noff+12:], rr.URI.Weight)
+		copy(data[noff+14:], rr.URI.Target)
+	case DNSTypeOPT:
+		noff2 := noff + 10
+		for _, opt := range rr.OPT {
+			binary.BigEndian.PutUint16(data[noff2:], uint16(opt.Code))
+			binary.BigEndian.PutUint16(data[noff2+2:], uint16(len(opt.Data)))
+			copy(data[noff2+4:], opt.Data)
+			noff2 += 4 + len(opt.Data)
+		}
+	default:
+		return 0, fmt.Errorf("serializing resource record of type %v not supported", rr.Type)
+	}
+
+	// DataLength
+	dSz := recSize(rr)
+	binary.BigEndian.PutUint16(data[noff+8:], uint16(dSz))
+
+	if opts.FixLengths {
+		rr.DataLength = uint16(dSz)
+	}
+
+	return nSz + 10 + dSz, nil
+}
+
+func (rr *DNSResourceRecord) String() string {
+
+	if rr.Type == DNSTypeOPT {
+		opts := make([]string, len(rr.OPT))
+		for i, opt := range rr.OPT {
+			opts[i] = opt.String()
+		}
+		return "OPT " + strings.Join(opts, ",")
+	}
+	if rr.Type == DNSTypeURI {
+		return fmt.Sprintf("URI %d %d %s", rr.URI.Priority, rr.URI.Weight, string(rr.URI.Target))
+	}
+	if rr.Class == DNSClassIN {
+		switch rr.Type {
+		case DNSTypeA, DNSTypeAAAA:
+			return rr.IP.String()
+		case DNSTypeNS:
+			return "NS " + string(rr.NS)
+		case DNSTypeCNAME:
+			return "CNAME " + string(rr.CNAME)
+		case DNSTypePTR:
+			return "PTR " + string(rr.PTR)
+		case DNSTypeTXT:
+			return "TXT " + string(rr.TXT)
+		}
+	}
+
+	return fmt.Sprintf("<%v, %v>", rr.Class, rr.Type)
+}
+
+func decodeCharacterStrings(data []byte) ([][]byte, error) {
+	strings := make([][]byte, 0, 1)
+	end := len(data)
+	for index, index2 := 0, 0; index != end; index = index2 {
+		index2 = index + 1 + int(data[index]) // index increases by 1..256 and does not overflow
+		if index2 > end {
+			return nil, errCharStringMissData
+		}
+		strings = append(strings, data[index+1:index2])
+	}
+	return strings, nil
+}
+
+func decodeOPTs(data []byte, offset int) ([]DNSOPT, error) {
+	allOPT := []DNSOPT{}
+	end := len(data)
+
+	if offset == end {
+		return allOPT, nil // There is no data to read
+	}
+
+	if offset+4 > end {
+		return allOPT, fmt.Errorf("DNSOPT record is of length %d, it should be at least length 4", end-offset)
+	}
+
+	for i := offset; i < end; {
+		opt := DNSOPT{}
+		if len(data) < i+4 {
+			return allOPT, fmt.Errorf("Malformed DNSOPT record.  Length %d < %d", len(data), i+4)
+		}
+		opt.Code = DNSOptionCode(binary.BigEndian.Uint16(data[i : i+2]))
+		l := binary.BigEndian.Uint16(data[i+2 : i+4])
+		if i+4+int(l) > end {
+			return allOPT, fmt.Errorf("Malformed DNSOPT record. The length (%d) field implies a packet larger than the one received", l)
+		}
+		opt.Data = data[i+4 : i+4+int(l)]
+		allOPT = append(allOPT, opt)
+		i += int(l) + 4
+	}
+	return allOPT, nil
+}
+
+func (rr *DNSResourceRecord) decodeRData(data []byte, offset int, buffer *[]byte) error {
+	switch rr.Type {
+	case DNSTypeA:
+		rr.IP = rr.Data
+	case DNSTypeAAAA:
+		rr.IP = rr.Data
+	case DNSTypeTXT, DNSTypeHINFO:
+		rr.TXT = rr.Data
+		txts, err := decodeCharacterStrings(rr.Data)
+		if err != nil {
+			return err
+		}
+		rr.TXTs = txts
+	case DNSTypeNS:
+		name, _, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.NS = name
+	case DNSTypeCNAME:
+		name, _, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.CNAME = name
+	case DNSTypePTR:
+		name, _, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.PTR = name
+	case DNSTypeSOA:
+		name, endq, err := decodeName(data, offset, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.SOA.MName = name
+		name, endq, err = decodeName(data, endq, buffer, 1)
+		if err != nil {
+			return err
+		}
+		if len(data) < endq+20 {
+			return errors.New("SOA too small")
+		}
+		rr.SOA.RName = name
+		rr.SOA.Serial = binary.BigEndian.Uint32(data[endq : endq+4])
+		rr.SOA.Refresh = binary.BigEndian.Uint32(data[endq+4 : endq+8])
+		rr.SOA.Retry = binary.BigEndian.Uint32(data[endq+8 : endq+12])
+		rr.SOA.Expire = binary.BigEndian.Uint32(data[endq+12 : endq+16])
+		rr.SOA.Minimum = binary.BigEndian.Uint32(data[endq+16 : endq+20])
+	case DNSTypeMX:
+		if len(data) < offset+2 {
+			return errors.New("MX too small")
+		}
+		rr.MX.Preference = binary.BigEndian.Uint16(data[offset : offset+2])
+		name, _, err := decodeName(data, offset+2, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.MX.Name = name
+	case DNSTypeURI:
+		if len(rr.Data) < 4 {
+			return errors.New("URI too small")
+		}
+		rr.URI.Priority = binary.BigEndian.Uint16(data[offset : offset+2])
+		rr.URI.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		rr.URI.Target = rr.Data[4:]
+	case DNSTypeSRV:
+		if len(data) < offset+6 {
+			return errors.New("SRV too small")
+		}
+		rr.SRV.Priority = binary.BigEndian.Uint16(data[offset : offset+2])
+		rr.SRV.Weight = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		rr.SRV.Port = binary.BigEndian.Uint16(data[offset+4 : offset+6])
+		name, _, err := decodeName(data, offset+6, buffer, 1)
+		if err != nil {
+			return err
+		}
+		rr.SRV.Name = name
+	case DNSTypeOPT:
+		allOPT, err := decodeOPTs(data, offset)
+		if err != nil {
+			return err
+		}
+		rr.OPT = allOPT
+	}
+	return nil
+}
+
+// DNSSOA is a Start of Authority record.  Each domain requires a SOA record at
+// the cutover where a domain is delegated from its parent.
+type DNSSOA struct {
+	MName, RName                            []byte
+	Serial, Refresh, Retry, Expire, Minimum uint32
+}
+
+// DNSSRV is a Service record, defining a location (hostname/port) of a
+// server/service.
+type DNSSRV struct {
+	Priority, Weight, Port uint16
+	Name                   []byte
+}
+
+// DNSMX is a mail exchange record, defining a mail server for a recipient's
+// domain.
+type DNSMX struct {
+	Preference uint16
+	Name       []byte
+}
+
+// DNSURI is a URI record, defining a target (URI) of a server/service
+type DNSURI struct {
+	Priority, Weight uint16
+	Target           []byte
+}
+
+// DNSOptionCode represents the code of a DNS Option, see RFC6891, section 6.1.2
+type DNSOptionCode uint16
+
+func (doc DNSOptionCode) String() string {
+	switch doc {
+	default:
+		return "Unknown"
+	case DNSOptionCodeNSID:
+		return "NSID"
+	case DNSOptionCodeDAU:
+		return "DAU"
+	case DNSOptionCodeDHU:
+		return "DHU"
+	case DNSOptionCodeN3U:
+		return "N3U"
+	case DNSOptionCodeEDNSClientSubnet:
+		return "EDNSClientSubnet"
+	case DNSOptionCodeEDNSExpire:
+		return "EDNSExpire"
+	case DNSOptionCodeCookie:
+		return "Cookie"
+	case DNSOptionCodeEDNSKeepAlive:
+		return "EDNSKeepAlive"
+	case DNSOptionCodePadding:
+		return "CodePadding"
+	case DNSOptionCodeChain:
+		return "CodeChain"
+	case DNSOptionCodeEDNSKeyTag:
+		return "CodeEDNSKeyTag"
+	case DNSOptionCodeEDNSClientTag:
+		return "EDNSClientTag"
+	case DNSOptionCodeEDNSServerTag:
+		return "EDNSServerTag"
+	case DNSOptionCodeDeviceID:
+		return "DeviceID"
+	}
+}
+
+// DNSOptionCode known values. See IANA
+const (
+	DNSOptionCodeNSID             DNSOptionCode = 3
+	DNSOptionCodeDAU              DNSOptionCode = 5
+	DNSOptionCodeDHU              DNSOptionCode = 6
+	DNSOptionCodeN3U              DNSOptionCode = 7
+	DNSOptionCodeEDNSClientSubnet DNSOptionCode = 8
+	DNSOptionCodeEDNSExpire       DNSOptionCode = 9
+	DNSOptionCodeCookie           DNSOptionCode = 10
+	DNSOptionCodeEDNSKeepAlive    DNSOptionCode = 11
+	DNSOptionCodePadding          DNSOptionCode = 12
+	DNSOptionCodeChain            DNSOptionCode = 13
+	DNSOptionCodeEDNSKeyTag       DNSOptionCode = 14
+	DNSOptionCodeEDNSClientTag    DNSOptionCode = 16
+	DNSOptionCodeEDNSServerTag    DNSOptionCode = 17
+	DNSOptionCodeDeviceID         DNSOptionCode = 26946
+)
+
+// DNSOPT is a DNS Option, see RFC6891, section 6.1.2
+type DNSOPT struct {
+	Code DNSOptionCode
+	Data []byte
+}
+
+func (opt DNSOPT) String() string {
+	return fmt.Sprintf("%s=%x", opt.Code, opt.Data)
+}
+
+var (
+	errMaxRecursion = errors.New("max DNS recursion level hit")
+
+	errDNSNameOffsetTooHigh    = errors.New("dns name offset too high")
+	errDNSNameOffsetNegative   = errors.New("dns name offset is negative")
+	errDNSPacketTooShort       = errors.New("DNS packet too short")
+	errDNSNameTooLong          = errors.New("dns name is too long")
+	errDNSNameInvalidIndex     = errors.New("dns name uncomputable: invalid index")
+	errDNSPointerOffsetTooHigh = errors.New("dns offset pointer too high")
+	errDNSIndexOutOfRange      = errors.New("dns index walked out of range")
+	errDNSNameHasNoData        = errors.New("no dns data found for name")
+
+	errCharStringMissData = errors.New("Insufficient data for a <character-string>")
+
+	errDecodeRecordLength = errors.New("resource record length exceeds data")
+
+	errDecodeQueryBadQDCount = errors.New("Invalid query decoding, not the right number of questions")
+	errDecodeQueryBadANCount = errors.New("Invalid query decoding, not the right number of answers")
+	errDecodeQueryBadNSCount = errors.New("Invalid query decoding, not the right number of authorities")
+	errDecodeQueryBadARCount = errors.New("Invalid query decoding, not the right number of additionals info")
+)
diff --git a/vendor/github.com/google/gopacket/layers/doc.go b/vendor/github.com/google/gopacket/layers/doc.go
new file mode 100644
index 0000000..3c882c3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/doc.go
@@ -0,0 +1,61 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+Package layers provides decoding layers for many common protocols.
+
+The layers package contains decode implementations for a number of different
+types of packet layers.  Users of gopacket will almost always want to also use
+layers to actually decode packet data into useful pieces. To see the set of
+protocols that gopacket/layers is currently able to decode,
+look at the set of LayerTypes defined in the Variables sections. The
+layers package also defines endpoints for many of the common packet layers
+that have source/destination addresses associated with them, for example IPv4/6
+(IPs) and TCP/UDP (ports).
+Finally, layers contains a number of useful enumerations (IPProtocol,
+EthernetType, LinkType, PPPType, etc...).  Many of these implement the
+gopacket.Decoder interface, so they can be passed into gopacket as decoders.
+
+Most common protocol layers are named using acronyms or other industry-common
+names (IPv4, TCP, PPP).  Some of the less common ones have their names expanded
+(CiscoDiscoveryProtocol).
+For certain protocols, sub-parts of the protocol are split out into their own
+layers (SCTP, for example).  This is done mostly in cases where portions of the
+protocol may fulfill the capabilities of interesting layers (SCTPData implements
+ApplicationLayer, while base SCTP implements TransportLayer), or possibly
+because splitting a protocol into a few layers makes decoding easier.
+
+This package is meant to be used with its parent,
+http://github.com/google/gopacket.
+
+Port Types
+
+Instead of using raw uint16 or uint8 values for ports, we use a different port
+type for every protocol, for example TCPPort and UDPPort.  This allows us to
+override string behavior for each port, which we do by setting up port name
+maps (TCPPortNames, UDPPortNames, etc...).  Well-known ports are annotated with
+their protocol names, and their String function displays these names:
+
+  p := TCPPort(80)
+  fmt.Printf("Number: %d  String: %v", p, p)
+  // Prints: "Number: 80  String: 80(http)"
+
+Modifying Decode Behavior
+
+layers links together decoding through its enumerations.  For example, after
+decoding layer type Ethernet, it uses Ethernet.EthernetType as its next decoder.
+All enumerations that act as decoders, like EthernetType, can be modified by
+users depending on their preferences.  For example, if you have a spiffy new
+IPv4 decoder that works way better than the one built into layers, you can do
+this:
+
+ var mySpiffyIPv4Decoder gopacket.Decoder = ...
+ layers.EthernetTypeMetadata[EthernetTypeIPv4].DecodeWith = mySpiffyIPv4Decoder
+
+This will make all future ethernet packets use your new decoder to decode IPv4
+packets, instead of the built-in decoder used by gopacket.
+*/
+package layers
diff --git a/vendor/github.com/google/gopacket/layers/dot11.go b/vendor/github.com/google/gopacket/layers/dot11.go
new file mode 100644
index 0000000..3e64910
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot11.go
@@ -0,0 +1,2118 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html for info on
+// all of the layers in this file.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"hash/crc32"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// Dot11Flags contains the set of 8 flags in the IEEE 802.11 frame control
+// header, all in one place.
+type Dot11Flags uint8
+
+const (
+	Dot11FlagsToDS Dot11Flags = 1 << iota
+	Dot11FlagsFromDS
+	Dot11FlagsMF
+	Dot11FlagsRetry
+	Dot11FlagsPowerManagement
+	Dot11FlagsMD
+	Dot11FlagsWEP
+	Dot11FlagsOrder
+)
+
+func (d Dot11Flags) ToDS() bool {
+	return d&Dot11FlagsToDS != 0
+}
+func (d Dot11Flags) FromDS() bool {
+	return d&Dot11FlagsFromDS != 0
+}
+func (d Dot11Flags) MF() bool {
+	return d&Dot11FlagsMF != 0
+}
+func (d Dot11Flags) Retry() bool {
+	return d&Dot11FlagsRetry != 0
+}
+func (d Dot11Flags) PowerManagement() bool {
+	return d&Dot11FlagsPowerManagement != 0
+}
+func (d Dot11Flags) MD() bool {
+	return d&Dot11FlagsMD != 0
+}
+func (d Dot11Flags) WEP() bool {
+	return d&Dot11FlagsWEP != 0
+}
+func (d Dot11Flags) Order() bool {
+	return d&Dot11FlagsOrder != 0
+}
+
+// String provides a human readable string for Dot11Flags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Flags value, not its string.
+func (a Dot11Flags) String() string {
+	var out bytes.Buffer
+	if a.ToDS() {
+		out.WriteString("TO-DS,")
+	}
+	if a.FromDS() {
+		out.WriteString("FROM-DS,")
+	}
+	if a.MF() {
+		out.WriteString("MF,")
+	}
+	if a.Retry() {
+		out.WriteString("Retry,")
+	}
+	if a.PowerManagement() {
+		out.WriteString("PowerManagement,")
+	}
+	if a.MD() {
+		out.WriteString("MD,")
+	}
+	if a.WEP() {
+		out.WriteString("WEP,")
+	}
+	if a.Order() {
+		out.WriteString("Order,")
+	}
+
+	if length := out.Len(); length > 0 {
+		return string(out.Bytes()[:length-1]) // strip final comma
+	}
+	return ""
+}
+
+type Dot11Reason uint16
+
+// TODO: Verify these reasons, and append more reasons if necessary.
+
+const (
+	Dot11ReasonReserved          Dot11Reason = 1
+	Dot11ReasonUnspecified       Dot11Reason = 2
+	Dot11ReasonAuthExpired       Dot11Reason = 3
+	Dot11ReasonDeauthStLeaving   Dot11Reason = 4
+	Dot11ReasonInactivity        Dot11Reason = 5
+	Dot11ReasonApFull            Dot11Reason = 6
+	Dot11ReasonClass2FromNonAuth Dot11Reason = 7
+	Dot11ReasonClass3FromNonAss  Dot11Reason = 8
+	Dot11ReasonDisasStLeaving    Dot11Reason = 9
+	Dot11ReasonStNotAuth         Dot11Reason = 10
+)
+
+// String provides a human readable string for Dot11Reason.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Reason value, not its string.
+func (a Dot11Reason) String() string {
+	switch a {
+	case Dot11ReasonReserved:
+		return "Reserved"
+	case Dot11ReasonUnspecified:
+		return "Unspecified"
+	case Dot11ReasonAuthExpired:
+		return "Auth. expired"
+	case Dot11ReasonDeauthStLeaving:
+		return "Deauth. st. leaving"
+	case Dot11ReasonInactivity:
+		return "Inactivity"
+	case Dot11ReasonApFull:
+		return "Ap. full"
+	case Dot11ReasonClass2FromNonAuth:
+		return "Class2 from non auth."
+	case Dot11ReasonClass3FromNonAss:
+		return "Class3 from non ass."
+	case Dot11ReasonDisasStLeaving:
+		return "Disass st. leaving"
+	case Dot11ReasonStNotAuth:
+		return "St. not auth."
+	default:
+		return "Unknown reason"
+	}
+}
+
+type Dot11Status uint16
+
+const (
+	Dot11StatusSuccess                      Dot11Status = 0
+	Dot11StatusFailure                      Dot11Status = 1  // Unspecified failure
+	Dot11StatusCannotSupportAllCapabilities Dot11Status = 10 // Cannot support all requested capabilities in the Capability Information field
+	Dot11StatusInabilityExistsAssociation   Dot11Status = 11 // Reassociation denied due to inability to confirm that association exists
+	Dot11StatusAssociationDenied            Dot11Status = 12 // Association denied due to reason outside the scope of this standard
+	Dot11StatusAlgorithmUnsupported         Dot11Status = 13 // Responding station does not support the specified authentication algorithm
+	Dot11StatusOufOfExpectedSequence        Dot11Status = 14 // Received an Authentication frame with authentication transaction sequence number out of expected sequence
+	Dot11StatusChallengeFailure             Dot11Status = 15 // Authentication rejected because of challenge failure
+	Dot11StatusTimeout                      Dot11Status = 16 // Authentication rejected due to timeout waiting for next frame in sequence
+	Dot11StatusAPUnableToHandle             Dot11Status = 17 // Association denied because AP is unable to handle additional associated stations
+	Dot11StatusRateUnsupported              Dot11Status = 18 // Association denied due to requesting station not supporting all of the data rates in the BSSBasicRateSet parameter
+)
+
+// String provides a human readable string for Dot11Status.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Status value, not its string.
+func (a Dot11Status) String() string {
+	switch a {
+	case Dot11StatusSuccess:
+		return "success"
+	case Dot11StatusFailure:
+		return "failure"
+	case Dot11StatusCannotSupportAllCapabilities:
+		return "cannot-support-all-capabilities"
+	case Dot11StatusInabilityExistsAssociation:
+		return "inability-exists-association"
+	case Dot11StatusAssociationDenied:
+		return "association-denied"
+	case Dot11StatusAlgorithmUnsupported:
+		return "algorithm-unsupported"
+	case Dot11StatusOufOfExpectedSequence:
+		return "out-of-expected-sequence"
+	case Dot11StatusChallengeFailure:
+		return "challenge-failure"
+	case Dot11StatusTimeout:
+		return "timeout"
+	case Dot11StatusAPUnableToHandle:
+		return "ap-unable-to-handle"
+	case Dot11StatusRateUnsupported:
+		return "rate-unsupported"
+	default:
+		return "unknown status"
+	}
+}
+
+type Dot11AckPolicy uint8
+
+const (
+	Dot11AckPolicyNormal     Dot11AckPolicy = 0
+	Dot11AckPolicyNone       Dot11AckPolicy = 1
+	Dot11AckPolicyNoExplicit Dot11AckPolicy = 2
+	Dot11AckPolicyBlock      Dot11AckPolicy = 3
+)
+
+// String provides a human readable string for Dot11AckPolicy.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11AckPolicy value, not its string.
+func (a Dot11AckPolicy) String() string {
+	switch a {
+	case Dot11AckPolicyNormal:
+		return "normal-ack"
+	case Dot11AckPolicyNone:
+		return "no-ack"
+	case Dot11AckPolicyNoExplicit:
+		return "no-explicit-ack"
+	case Dot11AckPolicyBlock:
+		return "block-ack"
+	default:
+		return "unknown-ack-policy"
+	}
+}
+
+type Dot11Algorithm uint16
+
+const (
+	Dot11AlgorithmOpen      Dot11Algorithm = 0
+	Dot11AlgorithmSharedKey Dot11Algorithm = 1
+)
+
+// String provides a human readable string for Dot11Algorithm.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11Algorithm value, not its string.
+func (a Dot11Algorithm) String() string {
+	switch a {
+	case Dot11AlgorithmOpen:
+		return "open"
+	case Dot11AlgorithmSharedKey:
+		return "shared-key"
+	default:
+		return "unknown-algorithm"
+	}
+}
+
+type Dot11InformationElementID uint8
+
+const (
+	Dot11InformationElementIDSSID                      Dot11InformationElementID = 0
+	Dot11InformationElementIDRates                     Dot11InformationElementID = 1
+	Dot11InformationElementIDFHSet                     Dot11InformationElementID = 2
+	Dot11InformationElementIDDSSet                     Dot11InformationElementID = 3
+	Dot11InformationElementIDCFSet                     Dot11InformationElementID = 4
+	Dot11InformationElementIDTIM                       Dot11InformationElementID = 5
+	Dot11InformationElementIDIBSSSet                   Dot11InformationElementID = 6
+	Dot11InformationElementIDCountryInfo               Dot11InformationElementID = 7
+	Dot11InformationElementIDHoppingPatternParam       Dot11InformationElementID = 8
+	Dot11InformationElementIDHoppingPatternTable       Dot11InformationElementID = 9
+	Dot11InformationElementIDRequest                   Dot11InformationElementID = 10
+	Dot11InformationElementIDQBSSLoadElem              Dot11InformationElementID = 11
+	Dot11InformationElementIDEDCAParamSet              Dot11InformationElementID = 12
+	Dot11InformationElementIDTrafficSpec               Dot11InformationElementID = 13
+	Dot11InformationElementIDTrafficClass              Dot11InformationElementID = 14
+	Dot11InformationElementIDSchedule                  Dot11InformationElementID = 15
+	Dot11InformationElementIDChallenge                 Dot11InformationElementID = 16
+	Dot11InformationElementIDPowerConst                Dot11InformationElementID = 32
+	Dot11InformationElementIDPowerCapability           Dot11InformationElementID = 33
+	Dot11InformationElementIDTPCRequest                Dot11InformationElementID = 34
+	Dot11InformationElementIDTPCReport                 Dot11InformationElementID = 35
+	Dot11InformationElementIDSupportedChannels         Dot11InformationElementID = 36
+	Dot11InformationElementIDSwitchChannelAnnounce     Dot11InformationElementID = 37
+	Dot11InformationElementIDMeasureRequest            Dot11InformationElementID = 38
+	Dot11InformationElementIDMeasureReport             Dot11InformationElementID = 39
+	Dot11InformationElementIDQuiet                     Dot11InformationElementID = 40
+	Dot11InformationElementIDIBSSDFS                   Dot11InformationElementID = 41
+	Dot11InformationElementIDERPInfo                   Dot11InformationElementID = 42
+	Dot11InformationElementIDTSDelay                   Dot11InformationElementID = 43
+	Dot11InformationElementIDTCLASProcessing           Dot11InformationElementID = 44
+	Dot11InformationElementIDHTCapabilities            Dot11InformationElementID = 45
+	Dot11InformationElementIDQOSCapability             Dot11InformationElementID = 46
+	Dot11InformationElementIDERPInfo2                  Dot11InformationElementID = 47
+	Dot11InformationElementIDRSNInfo                   Dot11InformationElementID = 48
+	Dot11InformationElementIDESRates                   Dot11InformationElementID = 50
+	Dot11InformationElementIDAPChannelReport           Dot11InformationElementID = 51
+	Dot11InformationElementIDNeighborReport            Dot11InformationElementID = 52
+	Dot11InformationElementIDRCPI                      Dot11InformationElementID = 53
+	Dot11InformationElementIDMobilityDomain            Dot11InformationElementID = 54
+	Dot11InformationElementIDFastBSSTrans              Dot11InformationElementID = 55
+	Dot11InformationElementIDTimeoutInt                Dot11InformationElementID = 56
+	Dot11InformationElementIDRICData                   Dot11InformationElementID = 57
+	Dot11InformationElementIDDSERegisteredLoc          Dot11InformationElementID = 58
+	Dot11InformationElementIDSuppOperatingClass        Dot11InformationElementID = 59
+	Dot11InformationElementIDExtChanSwitchAnnounce     Dot11InformationElementID = 60
+	Dot11InformationElementIDHTInfo                    Dot11InformationElementID = 61
+	Dot11InformationElementIDSecChanOffset             Dot11InformationElementID = 62
+	Dot11InformationElementIDBSSAverageAccessDelay     Dot11InformationElementID = 63
+	Dot11InformationElementIDAntenna                   Dot11InformationElementID = 64
+	Dot11InformationElementIDRSNI                      Dot11InformationElementID = 65
+	Dot11InformationElementIDMeasurePilotTrans         Dot11InformationElementID = 66
+	Dot11InformationElementIDBSSAvailAdmCapacity       Dot11InformationElementID = 67
+	Dot11InformationElementIDBSSACAccDelayWAPIParam    Dot11InformationElementID = 68
+	Dot11InformationElementIDTimeAdvertisement         Dot11InformationElementID = 69
+	Dot11InformationElementIDRMEnabledCapabilities     Dot11InformationElementID = 70
+	Dot11InformationElementIDMultipleBSSID             Dot11InformationElementID = 71
+	Dot11InformationElementID2040BSSCoExist            Dot11InformationElementID = 72
+	Dot11InformationElementID2040BSSIntChanReport      Dot11InformationElementID = 73
+	Dot11InformationElementIDOverlapBSSScanParam       Dot11InformationElementID = 74
+	Dot11InformationElementIDRICDescriptor             Dot11InformationElementID = 75
+	Dot11InformationElementIDManagementMIC             Dot11InformationElementID = 76
+	Dot11InformationElementIDEventRequest              Dot11InformationElementID = 78
+	Dot11InformationElementIDEventReport               Dot11InformationElementID = 79
+	Dot11InformationElementIDDiagnosticRequest         Dot11InformationElementID = 80
+	Dot11InformationElementIDDiagnosticReport          Dot11InformationElementID = 81
+	Dot11InformationElementIDLocationParam             Dot11InformationElementID = 82
+	Dot11InformationElementIDNonTransBSSIDCapability   Dot11InformationElementID = 83
+	Dot11InformationElementIDSSIDList                  Dot11InformationElementID = 84
+	Dot11InformationElementIDMultipleBSSIDIndex        Dot11InformationElementID = 85
+	Dot11InformationElementIDFMSDescriptor             Dot11InformationElementID = 86
+	Dot11InformationElementIDFMSRequest                Dot11InformationElementID = 87
+	Dot11InformationElementIDFMSResponse               Dot11InformationElementID = 88
+	Dot11InformationElementIDQOSTrafficCapability      Dot11InformationElementID = 89
+	Dot11InformationElementIDBSSMaxIdlePeriod          Dot11InformationElementID = 90
+	Dot11InformationElementIDTFSRequest                Dot11InformationElementID = 91
+	Dot11InformationElementIDTFSResponse               Dot11InformationElementID = 92
+	Dot11InformationElementIDWNMSleepMode              Dot11InformationElementID = 93
+	Dot11InformationElementIDTIMBroadcastRequest       Dot11InformationElementID = 94
+	Dot11InformationElementIDTIMBroadcastResponse      Dot11InformationElementID = 95
+	Dot11InformationElementIDCollInterferenceReport    Dot11InformationElementID = 96
+	Dot11InformationElementIDChannelUsage              Dot11InformationElementID = 97
+	Dot11InformationElementIDTimeZone                  Dot11InformationElementID = 98
+	Dot11InformationElementIDDMSRequest                Dot11InformationElementID = 99
+	Dot11InformationElementIDDMSResponse               Dot11InformationElementID = 100
+	Dot11InformationElementIDLinkIdentifier            Dot11InformationElementID = 101
+	Dot11InformationElementIDWakeupSchedule            Dot11InformationElementID = 102
+	Dot11InformationElementIDChannelSwitchTiming       Dot11InformationElementID = 104
+	Dot11InformationElementIDPTIControl                Dot11InformationElementID = 105
+	Dot11InformationElementIDPUBufferStatus            Dot11InformationElementID = 106
+	Dot11InformationElementIDInterworking              Dot11InformationElementID = 107
+	Dot11InformationElementIDAdvertisementProtocol     Dot11InformationElementID = 108
+	Dot11InformationElementIDExpBWRequest              Dot11InformationElementID = 109
+	Dot11InformationElementIDQOSMapSet                 Dot11InformationElementID = 110
+	Dot11InformationElementIDRoamingConsortium         Dot11InformationElementID = 111
+	Dot11InformationElementIDEmergencyAlertIdentifier  Dot11InformationElementID = 112
+	Dot11InformationElementIDMeshConfiguration         Dot11InformationElementID = 113
+	Dot11InformationElementIDMeshID                    Dot11InformationElementID = 114
+	Dot11InformationElementIDMeshLinkMetricReport      Dot11InformationElementID = 115
+	Dot11InformationElementIDCongestionNotification    Dot11InformationElementID = 116
+	Dot11InformationElementIDMeshPeeringManagement     Dot11InformationElementID = 117
+	Dot11InformationElementIDMeshChannelSwitchParam    Dot11InformationElementID = 118
+	Dot11InformationElementIDMeshAwakeWindows          Dot11InformationElementID = 119
+	Dot11InformationElementIDBeaconTiming              Dot11InformationElementID = 120
+	Dot11InformationElementIDMCCAOPSetupRequest        Dot11InformationElementID = 121
+	Dot11InformationElementIDMCCAOPSetupReply          Dot11InformationElementID = 122
+	Dot11InformationElementIDMCCAOPAdvertisement       Dot11InformationElementID = 123
+	Dot11InformationElementIDMCCAOPTeardown            Dot11InformationElementID = 124
+	Dot11InformationElementIDGateAnnouncement          Dot11InformationElementID = 125
+	Dot11InformationElementIDRootAnnouncement          Dot11InformationElementID = 126
+	Dot11InformationElementIDExtCapability             Dot11InformationElementID = 127
+	Dot11InformationElementIDAgereProprietary          Dot11InformationElementID = 128
+	Dot11InformationElementIDPathRequest               Dot11InformationElementID = 130
+	Dot11InformationElementIDPathReply                 Dot11InformationElementID = 131
+	Dot11InformationElementIDPathError                 Dot11InformationElementID = 132
+	Dot11InformationElementIDCiscoCCX1CKIPDeviceName   Dot11InformationElementID = 133
+	Dot11InformationElementIDCiscoCCX2                 Dot11InformationElementID = 136
+	Dot11InformationElementIDProxyUpdate               Dot11InformationElementID = 137
+	Dot11InformationElementIDProxyUpdateConfirmation   Dot11InformationElementID = 138
+	Dot11InformationElementIDAuthMeshPerringExch       Dot11InformationElementID = 139
+	Dot11InformationElementIDMIC                       Dot11InformationElementID = 140
+	Dot11InformationElementIDDestinationURI            Dot11InformationElementID = 141
+	Dot11InformationElementIDUAPSDCoexistence          Dot11InformationElementID = 142
+	Dot11InformationElementIDWakeupSchedule80211ad     Dot11InformationElementID = 143
+	Dot11InformationElementIDExtendedSchedule          Dot11InformationElementID = 144
+	Dot11InformationElementIDSTAAvailability           Dot11InformationElementID = 145
+	Dot11InformationElementIDDMGTSPEC                  Dot11InformationElementID = 146
+	Dot11InformationElementIDNextDMGATI                Dot11InformationElementID = 147
+	Dot11InformationElementIDDMSCapabilities           Dot11InformationElementID = 148
+	Dot11InformationElementIDCiscoUnknown95            Dot11InformationElementID = 149
+	Dot11InformationElementIDVendor2                   Dot11InformationElementID = 150
+	Dot11InformationElementIDDMGOperating              Dot11InformationElementID = 151
+	Dot11InformationElementIDDMGBSSParamChange         Dot11InformationElementID = 152
+	Dot11InformationElementIDDMGBeamRefinement         Dot11InformationElementID = 153
+	Dot11InformationElementIDChannelMeasFeedback       Dot11InformationElementID = 154
+	Dot11InformationElementIDAwakeWindow               Dot11InformationElementID = 157
+	Dot11InformationElementIDMultiBand                 Dot11InformationElementID = 158
+	Dot11InformationElementIDADDBAExtension            Dot11InformationElementID = 159
+	Dot11InformationElementIDNEXTPCPList               Dot11InformationElementID = 160
+	Dot11InformationElementIDPCPHandover               Dot11InformationElementID = 161
+	Dot11InformationElementIDDMGLinkMargin             Dot11InformationElementID = 162
+	Dot11InformationElementIDSwitchingStream           Dot11InformationElementID = 163
+	Dot11InformationElementIDSessionTransmission       Dot11InformationElementID = 164
+	Dot11InformationElementIDDynamicTonePairReport     Dot11InformationElementID = 165
+	Dot11InformationElementIDClusterReport             Dot11InformationElementID = 166
+	Dot11InformationElementIDRelayCapabilities         Dot11InformationElementID = 167
+	Dot11InformationElementIDRelayTransferParameter    Dot11InformationElementID = 168
+	Dot11InformationElementIDBeamlinkMaintenance       Dot11InformationElementID = 169
+	Dot11InformationElementIDMultipleMacSublayers      Dot11InformationElementID = 170
+	Dot11InformationElementIDUPID                      Dot11InformationElementID = 171
+	Dot11InformationElementIDDMGLinkAdaptionAck        Dot11InformationElementID = 172
+	Dot11InformationElementIDSymbolProprietary         Dot11InformationElementID = 173
+	Dot11InformationElementIDMCCAOPAdvertOverview      Dot11InformationElementID = 174
+	Dot11InformationElementIDQuietPeriodRequest        Dot11InformationElementID = 175
+	Dot11InformationElementIDQuietPeriodResponse       Dot11InformationElementID = 177
+	Dot11InformationElementIDECPACPolicy               Dot11InformationElementID = 182
+	Dot11InformationElementIDClusterTimeOffset         Dot11InformationElementID = 183
+	Dot11InformationElementIDAntennaSectorID           Dot11InformationElementID = 190
+	Dot11InformationElementIDVHTCapabilities           Dot11InformationElementID = 191
+	Dot11InformationElementIDVHTOperation              Dot11InformationElementID = 192
+	Dot11InformationElementIDExtendedBSSLoad           Dot11InformationElementID = 193
+	Dot11InformationElementIDWideBWChannelSwitch       Dot11InformationElementID = 194
+	Dot11InformationElementIDVHTTxPowerEnvelope        Dot11InformationElementID = 195
+	Dot11InformationElementIDChannelSwitchWrapper      Dot11InformationElementID = 196
+	Dot11InformationElementIDOperatingModeNotification Dot11InformationElementID = 199
+	Dot11InformationElementIDUPSIM                     Dot11InformationElementID = 200
+	Dot11InformationElementIDReducedNeighborReport     Dot11InformationElementID = 201
+	Dot11InformationElementIDTVHTOperation             Dot11InformationElementID = 202
+	Dot11InformationElementIDDeviceLocation            Dot11InformationElementID = 204
+	Dot11InformationElementIDWhiteSpaceMap             Dot11InformationElementID = 205
+	Dot11InformationElementIDFineTuningMeasureParams   Dot11InformationElementID = 206
+	Dot11InformationElementIDVendor                    Dot11InformationElementID = 221
+)
+
+// String provides a human readable string for Dot11InformationElementID.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the Dot11InformationElementID value,
+// not its string.
+func (a Dot11InformationElementID) String() string {
+	switch a {
+	case Dot11InformationElementIDSSID:
+		return "SSID parameter set"
+	case Dot11InformationElementIDRates:
+		return "Supported Rates"
+	case Dot11InformationElementIDFHSet:
+		return "FH Parameter set"
+	case Dot11InformationElementIDDSSet:
+		return "DS Parameter set"
+	case Dot11InformationElementIDCFSet:
+		return "CF Parameter set"
+	case Dot11InformationElementIDTIM:
+		return "Traffic Indication Map (TIM)"
+	case Dot11InformationElementIDIBSSSet:
+		return "IBSS Parameter set"
+	case Dot11InformationElementIDCountryInfo:
+		return "Country Information"
+	case Dot11InformationElementIDHoppingPatternParam:
+		return "Hopping Pattern Parameters"
+	case Dot11InformationElementIDHoppingPatternTable:
+		return "Hopping Pattern Table"
+	case Dot11InformationElementIDRequest:
+		return "Request"
+	case Dot11InformationElementIDQBSSLoadElem:
+		return "QBSS Load Element"
+	case Dot11InformationElementIDEDCAParamSet:
+		return "EDCA Parameter Set"
+	case Dot11InformationElementIDTrafficSpec:
+		return "Traffic Specification"
+	case Dot11InformationElementIDTrafficClass:
+		return "Traffic Classification"
+	case Dot11InformationElementIDSchedule:
+		return "Schedule"
+	case Dot11InformationElementIDChallenge:
+		return "Challenge text"
+	case Dot11InformationElementIDPowerConst:
+		return "Power Constraint"
+	case Dot11InformationElementIDPowerCapability:
+		return "Power Capability"
+	case Dot11InformationElementIDTPCRequest:
+		return "TPC Request"
+	case Dot11InformationElementIDTPCReport:
+		return "TPC Report"
+	case Dot11InformationElementIDSupportedChannels:
+		return "Supported Channels"
+	case Dot11InformationElementIDSwitchChannelAnnounce:
+		return "Channel Switch Announcement"
+	case Dot11InformationElementIDMeasureRequest:
+		return "Measurement Request"
+	case Dot11InformationElementIDMeasureReport:
+		return "Measurement Report"
+	case Dot11InformationElementIDQuiet:
+		return "Quiet"
+	case Dot11InformationElementIDIBSSDFS:
+		return "IBSS DFS"
+	case Dot11InformationElementIDERPInfo:
+		return "ERP Information"
+	case Dot11InformationElementIDTSDelay:
+		return "TS Delay"
+	case Dot11InformationElementIDTCLASProcessing:
+		return "TCLAS Processing"
+	case Dot11InformationElementIDHTCapabilities:
+		return "HT Capabilities (802.11n D1.10)"
+	case Dot11InformationElementIDQOSCapability:
+		return "QOS Capability"
+	case Dot11InformationElementIDERPInfo2:
+		return "ERP Information-2"
+	case Dot11InformationElementIDRSNInfo:
+		return "RSN Information"
+	case Dot11InformationElementIDESRates:
+		return "Extended Supported Rates"
+	case Dot11InformationElementIDAPChannelReport:
+		return "AP Channel Report"
+	case Dot11InformationElementIDNeighborReport:
+		return "Neighbor Report"
+	case Dot11InformationElementIDRCPI:
+		return "RCPI"
+	case Dot11InformationElementIDMobilityDomain:
+		return "Mobility Domain"
+	case Dot11InformationElementIDFastBSSTrans:
+		return "Fast BSS Transition"
+	case Dot11InformationElementIDTimeoutInt:
+		return "Timeout Interval"
+	case Dot11InformationElementIDRICData:
+		return "RIC Data"
+	case Dot11InformationElementIDDSERegisteredLoc:
+		return "DSE Registered Location"
+	case Dot11InformationElementIDSuppOperatingClass:
+		return "Supported Operating Classes"
+	case Dot11InformationElementIDExtChanSwitchAnnounce:
+		return "Extended Channel Switch Announcement"
+	case Dot11InformationElementIDHTInfo:
+		return "HT Information (802.11n D1.10)"
+	case Dot11InformationElementIDSecChanOffset:
+		return "Secondary Channel Offset (802.11n D1.10)"
+	case Dot11InformationElementIDBSSAverageAccessDelay:
+		return "BSS Average Access Delay"
+	case Dot11InformationElementIDAntenna:
+		return "Antenna"
+	case Dot11InformationElementIDRSNI:
+		return "RSNI"
+	case Dot11InformationElementIDMeasurePilotTrans:
+		return "Measurement Pilot Transmission"
+	case Dot11InformationElementIDBSSAvailAdmCapacity:
+		return "BSS Available Admission Capacity"
+	case Dot11InformationElementIDBSSACAccDelayWAPIParam:
+		return "BSS AC Access Delay/WAPI Parameter Set"
+	case Dot11InformationElementIDTimeAdvertisement:
+		return "Time Advertisement"
+	case Dot11InformationElementIDRMEnabledCapabilities:
+		return "RM Enabled Capabilities"
+	case Dot11InformationElementIDMultipleBSSID:
+		return "Multiple BSSID"
+	case Dot11InformationElementID2040BSSCoExist:
+		return "20/40 BSS Coexistence"
+	case Dot11InformationElementID2040BSSIntChanReport:
+		return "20/40 BSS Intolerant Channel Report"
+	case Dot11InformationElementIDOverlapBSSScanParam:
+		return "Overlapping BSS Scan Parameters"
+	case Dot11InformationElementIDRICDescriptor:
+		return "RIC Descriptor"
+	case Dot11InformationElementIDManagementMIC:
+		return "Management MIC"
+	case Dot11InformationElementIDEventRequest:
+		return "Event Request"
+	case Dot11InformationElementIDEventReport:
+		return "Event Report"
+	case Dot11InformationElementIDDiagnosticRequest:
+		return "Diagnostic Request"
+	case Dot11InformationElementIDDiagnosticReport:
+		return "Diagnostic Report"
+	case Dot11InformationElementIDLocationParam:
+		return "Location Parameters"
+	case Dot11InformationElementIDNonTransBSSIDCapability:
+		return "Non Transmitted BSSID Capability"
+	case Dot11InformationElementIDSSIDList:
+		return "SSID List"
+	case Dot11InformationElementIDMultipleBSSIDIndex:
+		return "Multiple BSSID Index"
+	case Dot11InformationElementIDFMSDescriptor:
+		return "FMS Descriptor"
+	case Dot11InformationElementIDFMSRequest:
+		return "FMS Request"
+	case Dot11InformationElementIDFMSResponse:
+		return "FMS Response"
+	case Dot11InformationElementIDQOSTrafficCapability:
+		return "QoS Traffic Capability"
+	case Dot11InformationElementIDBSSMaxIdlePeriod:
+		return "BSS Max Idle Period"
+	case Dot11InformationElementIDTFSRequest:
+		return "TFS Request"
+	case Dot11InformationElementIDTFSResponse:
+		return "TFS Response"
+	case Dot11InformationElementIDWNMSleepMode:
+		return "WNM-Sleep Mode"
+	case Dot11InformationElementIDTIMBroadcastRequest:
+		return "TIM Broadcast Request"
+	case Dot11InformationElementIDTIMBroadcastResponse:
+		return "TIM Broadcast Response"
+	case Dot11InformationElementIDCollInterferenceReport:
+		return "Collocated Interference Report"
+	case Dot11InformationElementIDChannelUsage:
+		return "Channel Usage"
+	case Dot11InformationElementIDTimeZone:
+		return "Time Zone"
+	case Dot11InformationElementIDDMSRequest:
+		return "DMS Request"
+	case Dot11InformationElementIDDMSResponse:
+		return "DMS Response"
+	case Dot11InformationElementIDLinkIdentifier:
+		return "Link Identifier"
+	case Dot11InformationElementIDWakeupSchedule:
+		return "Wakeup Schedule"
+	case Dot11InformationElementIDChannelSwitchTiming:
+		return "Channel Switch Timing"
+	case Dot11InformationElementIDPTIControl:
+		return "PTI Control"
+	case Dot11InformationElementIDPUBufferStatus:
+		return "PU Buffer Status"
+	case Dot11InformationElementIDInterworking:
+		return "Interworking"
+	case Dot11InformationElementIDAdvertisementProtocol:
+		return "Advertisement Protocol"
+	case Dot11InformationElementIDExpBWRequest:
+		return "Expedited Bandwidth Request"
+	case Dot11InformationElementIDQOSMapSet:
+		return "QoS Map Set"
+	case Dot11InformationElementIDRoamingConsortium:
+		return "Roaming Consortium"
+	case Dot11InformationElementIDEmergencyAlertIdentifier:
+		return "Emergency Alert Identifier"
+	case Dot11InformationElementIDMeshConfiguration:
+		return "Mesh Configuration"
+	case Dot11InformationElementIDMeshID:
+		return "Mesh ID"
+	case Dot11InformationElementIDMeshLinkMetricReport:
+		return "Mesh Link Metric Report"
+	case Dot11InformationElementIDCongestionNotification:
+		return "Congestion Notification"
+	case Dot11InformationElementIDMeshPeeringManagement:
+		return "Mesh Peering Management"
+	case Dot11InformationElementIDMeshChannelSwitchParam:
+		return "Mesh Channel Switch Parameters"
+	case Dot11InformationElementIDMeshAwakeWindows:
+		return "Mesh Awake Windows"
+	case Dot11InformationElementIDBeaconTiming:
+		return "Beacon Timing"
+	case Dot11InformationElementIDMCCAOPSetupRequest:
+		return "MCCAOP Setup Request"
+	case Dot11InformationElementIDMCCAOPSetupReply:
+		return "MCCAOP SETUP Reply"
+	case Dot11InformationElementIDMCCAOPAdvertisement:
+		return "MCCAOP Advertisement"
+	case Dot11InformationElementIDMCCAOPTeardown:
+		return "MCCAOP Teardown"
+	case Dot11InformationElementIDGateAnnouncement:
+		return "Gate Announcement"
+	case Dot11InformationElementIDRootAnnouncement:
+		return "Root Announcement"
+	case Dot11InformationElementIDExtCapability:
+		return "Extended Capabilities"
+	case Dot11InformationElementIDAgereProprietary:
+		return "Agere Proprietary"
+	case Dot11InformationElementIDPathRequest:
+		return "Path Request"
+	case Dot11InformationElementIDPathReply:
+		return "Path Reply"
+	case Dot11InformationElementIDPathError:
+		return "Path Error"
+	case Dot11InformationElementIDCiscoCCX1CKIPDeviceName:
+		return "Cisco CCX1 CKIP + Device Name"
+	case Dot11InformationElementIDCiscoCCX2:
+		return "Cisco CCX2"
+	case Dot11InformationElementIDProxyUpdate:
+		return "Proxy Update"
+	case Dot11InformationElementIDProxyUpdateConfirmation:
+		return "Proxy Update Confirmation"
+	case Dot11InformationElementIDAuthMeshPerringExch:
+		return "Auhenticated Mesh Perring Exchange"
+	case Dot11InformationElementIDMIC:
+		return "MIC (Message Integrity Code)"
+	case Dot11InformationElementIDDestinationURI:
+		return "Destination URI"
+	case Dot11InformationElementIDUAPSDCoexistence:
+		return "U-APSD Coexistence"
+	case Dot11InformationElementIDWakeupSchedule80211ad:
+		return "Wakeup Schedule 802.11ad"
+	case Dot11InformationElementIDExtendedSchedule:
+		return "Extended Schedule"
+	case Dot11InformationElementIDSTAAvailability:
+		return "STA Availability"
+	case Dot11InformationElementIDDMGTSPEC:
+		return "DMG TSPEC"
+	case Dot11InformationElementIDNextDMGATI:
+		return "Next DMG ATI"
+	case Dot11InformationElementIDDMSCapabilities:
+		return "DMG Capabilities"
+	case Dot11InformationElementIDCiscoUnknown95:
+		return "Cisco Unknown 95"
+	case Dot11InformationElementIDVendor2:
+		return "Vendor Specific"
+	case Dot11InformationElementIDDMGOperating:
+		return "DMG Operating"
+	case Dot11InformationElementIDDMGBSSParamChange:
+		return "DMG BSS Parameter Change"
+	case Dot11InformationElementIDDMGBeamRefinement:
+		return "DMG Beam Refinement"
+	case Dot11InformationElementIDChannelMeasFeedback:
+		return "Channel Measurement Feedback"
+	case Dot11InformationElementIDAwakeWindow:
+		return "Awake Window"
+	case Dot11InformationElementIDMultiBand:
+		return "Multi Band"
+	case Dot11InformationElementIDADDBAExtension:
+		return "ADDBA Extension"
+	case Dot11InformationElementIDNEXTPCPList:
+		return "NEXTPCP List"
+	case Dot11InformationElementIDPCPHandover:
+		return "PCP Handover"
+	case Dot11InformationElementIDDMGLinkMargin:
+		return "DMG Link Margin"
+	case Dot11InformationElementIDSwitchingStream:
+		return "Switching Stream"
+	case Dot11InformationElementIDSessionTransmission:
+		return "Session Transmission"
+	case Dot11InformationElementIDDynamicTonePairReport:
+		return "Dynamic Tone Pairing Report"
+	case Dot11InformationElementIDClusterReport:
+		return "Cluster Report"
+	case Dot11InformationElementIDRelayCapabilities:
+		return "Relay Capabilities"
+	case Dot11InformationElementIDRelayTransferParameter:
+		return "Relay Transfer Parameter"
+	case Dot11InformationElementIDBeamlinkMaintenance:
+		return "Beamlink Maintenance"
+	case Dot11InformationElementIDMultipleMacSublayers:
+		return "Multiple MAC Sublayers"
+	case Dot11InformationElementIDUPID:
+		return "U-PID"
+	case Dot11InformationElementIDDMGLinkAdaptionAck:
+		return "DMG Link Adaption Acknowledgment"
+	case Dot11InformationElementIDSymbolProprietary:
+		return "Symbol Proprietary"
+	case Dot11InformationElementIDMCCAOPAdvertOverview:
+		return "MCCAOP Advertisement Overview"
+	case Dot11InformationElementIDQuietPeriodRequest:
+		return "Quiet Period Request"
+	case Dot11InformationElementIDQuietPeriodResponse:
+		return "Quiet Period Response"
+	case Dot11InformationElementIDECPACPolicy:
+		return "ECPAC Policy"
+	case Dot11InformationElementIDClusterTimeOffset:
+		return "Cluster Time Offset"
+	case Dot11InformationElementIDAntennaSectorID:
+		return "Antenna Sector ID"
+	case Dot11InformationElementIDVHTCapabilities:
+		return "VHT Capabilities (IEEE Std 802.11ac/D3.1)"
+	case Dot11InformationElementIDVHTOperation:
+		return "VHT Operation (IEEE Std 802.11ac/D3.1)"
+	case Dot11InformationElementIDExtendedBSSLoad:
+		return "Extended BSS Load"
+	case Dot11InformationElementIDWideBWChannelSwitch:
+		return "Wide Bandwidth Channel Switch"
+	case Dot11InformationElementIDVHTTxPowerEnvelope:
+		return "VHT Tx Power Envelope (IEEE Std 802.11ac/D5.0)"
+	case Dot11InformationElementIDChannelSwitchWrapper:
+		return "Channel Switch Wrapper"
+	case Dot11InformationElementIDOperatingModeNotification:
+		return "Operating Mode Notification"
+	case Dot11InformationElementIDUPSIM:
+		return "UP SIM"
+	case Dot11InformationElementIDReducedNeighborReport:
+		return "Reduced Neighbor Report"
+	case Dot11InformationElementIDTVHTOperation:
+		return "TVHT Op"
+	case Dot11InformationElementIDDeviceLocation:
+		return "Device Location"
+	case Dot11InformationElementIDWhiteSpaceMap:
+		return "White Space Map"
+	case Dot11InformationElementIDFineTuningMeasureParams:
+		return "Fine Tuning Measure Parameters"
+	case Dot11InformationElementIDVendor:
+		return "Vendor"
+	default:
+		return "Unknown information element id"
+	}
+}
+
+// Dot11 provides an IEEE 802.11 base packet header.
+// See http://standards.ieee.org/findstds/standard/802.11-2012.html
+// for excruciating detail.
+type Dot11 struct {
+	BaseLayer
+	Type           Dot11Type
+	Proto          uint8
+	Flags          Dot11Flags
+	DurationID     uint16
+	Address1       net.HardwareAddr
+	Address2       net.HardwareAddr
+	Address3       net.HardwareAddr
+	Address4       net.HardwareAddr
+	SequenceNumber uint16
+	FragmentNumber uint16
+	Checksum       uint32
+	QOS            *Dot11QOS
+	HTControl      *Dot11HTControl
+	DataLayer      gopacket.Layer
+}
+
+type Dot11QOS struct {
+	TID       uint8 /* Traffic IDentifier */
+	EOSP      bool  /* End of service period */
+	AckPolicy Dot11AckPolicy
+	TXOP      uint8
+}
+
+type Dot11HTControl struct {
+	ACConstraint bool
+	RDGMorePPDU  bool
+
+	VHT *Dot11HTControlVHT
+	HT  *Dot11HTControlHT
+}
+
+type Dot11HTControlHT struct {
+	LinkAdapationControl *Dot11LinkAdapationControl
+	CalibrationPosition  uint8
+	CalibrationSequence  uint8
+	CSISteering          uint8
+	NDPAnnouncement      bool
+	DEI                  bool
+}
+
+type Dot11HTControlVHT struct {
+	MRQ            bool
+	UnsolicitedMFB bool
+	MSI            *uint8
+	MFB            Dot11HTControlMFB
+	CompressedMSI  *uint8
+	STBCIndication bool
+	MFSI           *uint8
+	GID            *uint8
+	CodingType     *Dot11CodingType
+	FbTXBeamformed bool
+}
+
+type Dot11HTControlMFB struct {
+	NumSTS uint8
+	VHTMCS uint8
+	BW     uint8
+	SNR    int8
+}
+
+type Dot11LinkAdapationControl struct {
+	TRQ  bool
+	MRQ  bool
+	MSI  uint8
+	MFSI uint8
+	ASEL *Dot11ASEL
+	MFB  *uint8
+}
+
+type Dot11ASEL struct {
+	Command uint8
+	Data    uint8
+}
+
+type Dot11CodingType uint8
+
+const (
+	Dot11CodingTypeBCC  = 0
+	Dot11CodingTypeLDPC = 1
+)
+
+func (a Dot11CodingType) String() string {
+	switch a {
+	case Dot11CodingTypeBCC:
+		return "BCC"
+	case Dot11CodingTypeLDPC:
+		return "LDPC"
+	default:
+		return "Unknown coding type"
+	}
+}
+
+func (m *Dot11HTControlMFB) NoFeedBackPresent() bool {
+	return m.VHTMCS == 15 && m.NumSTS == 7
+}
+
+func decodeDot11(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(d)
+	if d.DataLayer != nil {
+		p.AddLayer(d.DataLayer)
+	}
+	return p.NextDecoder(d.NextLayerType())
+}
+
+func (m *Dot11) LayerType() gopacket.LayerType  { return LayerTypeDot11 }
+func (m *Dot11) CanDecode() gopacket.LayerClass { return LayerTypeDot11 }
+func (m *Dot11) NextLayerType() gopacket.LayerType {
+	if m.DataLayer != nil {
+		if m.Flags.WEP() {
+			return LayerTypeDot11WEP
+		}
+		return m.DataLayer.(gopacket.DecodingLayer).NextLayerType()
+	}
+	return m.Type.LayerType()
+}
+
+func createU8(x uint8) *uint8 {
+	return &x
+}
+
+var dataDecodeMap = map[Dot11Type]func() gopacket.DecodingLayer{
+	Dot11TypeData:                   func() gopacket.DecodingLayer { return &Dot11Data{} },
+	Dot11TypeDataCFAck:              func() gopacket.DecodingLayer { return &Dot11DataCFAck{} },
+	Dot11TypeDataCFPoll:             func() gopacket.DecodingLayer { return &Dot11DataCFPoll{} },
+	Dot11TypeDataCFAckPoll:          func() gopacket.DecodingLayer { return &Dot11DataCFAckPoll{} },
+	Dot11TypeDataNull:               func() gopacket.DecodingLayer { return &Dot11DataNull{} },
+	Dot11TypeDataCFAckNoData:        func() gopacket.DecodingLayer { return &Dot11DataCFAckNoData{} },
+	Dot11TypeDataCFPollNoData:       func() gopacket.DecodingLayer { return &Dot11DataCFPollNoData{} },
+	Dot11TypeDataCFAckPollNoData:    func() gopacket.DecodingLayer { return &Dot11DataCFAckPollNoData{} },
+	Dot11TypeDataQOSData:            func() gopacket.DecodingLayer { return &Dot11DataQOSData{} },
+	Dot11TypeDataQOSDataCFAck:       func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAck{} },
+	Dot11TypeDataQOSDataCFPoll:      func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFPoll{} },
+	Dot11TypeDataQOSDataCFAckPoll:   func() gopacket.DecodingLayer { return &Dot11DataQOSDataCFAckPoll{} },
+	Dot11TypeDataQOSNull:            func() gopacket.DecodingLayer { return &Dot11DataQOSNull{} },
+	Dot11TypeDataQOSCFPollNoData:    func() gopacket.DecodingLayer { return &Dot11DataQOSCFPollNoData{} },
+	Dot11TypeDataQOSCFAckPollNoData: func() gopacket.DecodingLayer { return &Dot11DataQOSCFAckPollNoData{} },
+}
+
+func (m *Dot11) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 10 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11 length %v too short, %v required", len(data), 10)
+	}
+	m.Type = Dot11Type((data[0])&0xFC) >> 2
+
+	m.DataLayer = nil
+	m.Proto = uint8(data[0]) & 0x0003
+	m.Flags = Dot11Flags(data[1])
+	m.DurationID = binary.LittleEndian.Uint16(data[2:4])
+	m.Address1 = net.HardwareAddr(data[4:10])
+
+	offset := 10
+
+	mainType := m.Type.MainType()
+
+	switch mainType {
+	case Dot11TypeCtrl:
+		switch m.Type {
+		case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+			if len(data) < offset+6 {
+				df.SetTruncated()
+				return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+			}
+			m.Address2 = net.HardwareAddr(data[offset : offset+6])
+			offset += 6
+		}
+	case Dot11TypeMgmt, Dot11TypeData:
+		if len(data) < offset+14 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+14)
+		}
+		m.Address2 = net.HardwareAddr(data[offset : offset+6])
+		offset += 6
+		m.Address3 = net.HardwareAddr(data[offset : offset+6])
+		offset += 6
+
+		m.SequenceNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0xFFF0) >> 4
+		m.FragmentNumber = (binary.LittleEndian.Uint16(data[offset:offset+2]) & 0x000F)
+		offset += 2
+	}
+
+	if mainType == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+		if len(data) < offset+6 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+		}
+		m.Address4 = net.HardwareAddr(data[offset : offset+6])
+		offset += 6
+	}
+
+	if m.Type.QOS() {
+		if len(data) < offset+2 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+		}
+		m.QOS = &Dot11QOS{
+			TID:       (uint8(data[offset]) & 0x0F),
+			EOSP:      (uint8(data[offset]) & 0x10) == 0x10,
+			AckPolicy: Dot11AckPolicy((uint8(data[offset]) & 0x60) >> 5),
+			TXOP:      uint8(data[offset+1]),
+		}
+		offset += 2
+	}
+	if m.Flags.Order() && (m.Type.QOS() || mainType == Dot11TypeMgmt) {
+		if len(data) < offset+4 {
+			df.SetTruncated()
+			return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+6)
+		}
+
+		htc := &Dot11HTControl{
+			ACConstraint: data[offset+3]&0x40 != 0,
+			RDGMorePPDU:  data[offset+3]&0x80 != 0,
+		}
+		m.HTControl = htc
+
+		if data[offset]&0x1 != 0 { // VHT Variant
+			vht := &Dot11HTControlVHT{}
+			htc.VHT = vht
+			vht.MRQ = data[offset]&0x4 != 0
+			vht.UnsolicitedMFB = data[offset+3]&0x20 != 0
+			vht.MFB = Dot11HTControlMFB{
+				NumSTS: uint8(data[offset+1] >> 1 & 0x7),
+				VHTMCS: uint8(data[offset+1] >> 4 & 0xF),
+				BW:     uint8(data[offset+2] & 0x3),
+				SNR:    int8((-(data[offset+2] >> 2 & 0x20))+data[offset+2]>>2&0x1F) + 22,
+			}
+
+			if vht.UnsolicitedMFB {
+				if !vht.MFB.NoFeedBackPresent() {
+					vht.CompressedMSI = createU8(data[offset] >> 3 & 0x3)
+					vht.STBCIndication = data[offset]&0x20 != 0
+					vht.CodingType = (*Dot11CodingType)(createU8(data[offset+3] >> 3 & 0x1))
+					vht.FbTXBeamformed = data[offset+3]&0x10 != 0
+					vht.GID = createU8(
+						data[offset]>>6 +
+							(data[offset+1] & 0x1 << 2) +
+							data[offset+3]&0x7<<3)
+				}
+			} else {
+				if vht.MRQ {
+					vht.MSI = createU8((data[offset] >> 3) & 0x07)
+				}
+				vht.MFSI = createU8(data[offset]>>6 + (data[offset+1] & 0x1 << 2))
+			}
+
+		} else { // HT Variant
+			ht := &Dot11HTControlHT{}
+			htc.HT = ht
+
+			lac := &Dot11LinkAdapationControl{}
+			ht.LinkAdapationControl = lac
+			lac.TRQ = data[offset]&0x2 != 0
+			lac.MFSI = data[offset]>>6&0x3 + data[offset+1]&0x1<<3
+			if data[offset]&0x3C == 0x38 { // ASEL
+				lac.ASEL = &Dot11ASEL{
+					Command: data[offset+1] >> 1 & 0x7,
+					Data:    data[offset+1] >> 4 & 0xF,
+				}
+			} else {
+				lac.MRQ = data[offset]&0x4 != 0
+				if lac.MRQ {
+					lac.MSI = data[offset] >> 3 & 0x7
+				}
+				lac.MFB = createU8(data[offset+1] >> 1)
+			}
+			ht.CalibrationPosition = data[offset+2] & 0x3
+			ht.CalibrationSequence = data[offset+2] >> 2 & 0x3
+			ht.CSISteering = data[offset+2] >> 6 & 0x3
+			ht.NDPAnnouncement = data[offset+3]&0x1 != 0
+			if mainType != Dot11TypeMgmt {
+				ht.DEI = data[offset+3]&0x20 != 0
+			}
+		}
+
+		offset += 4
+	}
+
+	if len(data) < offset+4 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11 length %v too short, %v required", len(data), offset+4)
+	}
+
+	m.BaseLayer = BaseLayer{
+		Contents: data[0:offset],
+		Payload:  data[offset : len(data)-4],
+	}
+
+	if mainType == Dot11TypeData {
+		d := dataDecodeMap[m.Type]
+		if d == nil {
+			return fmt.Errorf("unsupported type: %v", m.Type)
+		}
+		l := d()
+		err := l.DecodeFromBytes(m.BaseLayer.Payload, df)
+		if err != nil {
+			return err
+		}
+		m.DataLayer = l.(gopacket.Layer)
+	}
+
+	m.Checksum = binary.LittleEndian.Uint32(data[len(data)-4 : len(data)])
+	return nil
+}
+
+func (m *Dot11) ChecksumValid() bool {
+	// only for CTRL and MGMT frames
+	h := crc32.NewIEEE()
+	h.Write(m.Contents)
+	h.Write(m.Payload)
+	return m.Checksum == h.Sum32()
+}
+
+func (m Dot11) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(24)
+
+	if err != nil {
+		return err
+	}
+
+	buf[0] = (uint8(m.Type) << 2) | m.Proto
+	buf[1] = uint8(m.Flags)
+
+	binary.LittleEndian.PutUint16(buf[2:4], m.DurationID)
+
+	copy(buf[4:10], m.Address1)
+
+	offset := 10
+
+	switch m.Type.MainType() {
+	case Dot11TypeCtrl:
+		switch m.Type {
+		case Dot11TypeCtrlRTS, Dot11TypeCtrlPowersavePoll, Dot11TypeCtrlCFEnd, Dot11TypeCtrlCFEndAck:
+			copy(buf[offset:offset+6], m.Address2)
+			offset += 6
+		}
+	case Dot11TypeMgmt, Dot11TypeData:
+		copy(buf[offset:offset+6], m.Address2)
+		offset += 6
+		copy(buf[offset:offset+6], m.Address3)
+		offset += 6
+
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], (m.SequenceNumber<<4)|m.FragmentNumber)
+		offset += 2
+	}
+
+	if m.Type.MainType() == Dot11TypeData && m.Flags.FromDS() && m.Flags.ToDS() {
+		copy(buf[offset:offset+6], m.Address4)
+		offset += 6
+	}
+
+	return nil
+}
+
+// Dot11Mgmt is a base for all IEEE 802.11 management layers.
+type Dot11Mgmt struct {
+	BaseLayer
+}
+
+func (m *Dot11Mgmt) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+func (m *Dot11Mgmt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+// Dot11Ctrl is a base for all IEEE 802.11 control layers.
+type Dot11Ctrl struct {
+	BaseLayer
+}
+
+func (m *Dot11Ctrl) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11Ctrl) LayerType() gopacket.LayerType  { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) CanDecode() gopacket.LayerClass { return LayerTypeDot11Ctrl }
+func (m *Dot11Ctrl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeDot11Ctrl(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11Ctrl{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11WEP contains WEP encrpted IEEE 802.11 data.
+type Dot11WEP struct {
+	BaseLayer
+}
+
+func (m *Dot11WEP) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+func (m *Dot11WEP) LayerType() gopacket.LayerType  { return LayerTypeDot11WEP }
+func (m *Dot11WEP) CanDecode() gopacket.LayerClass { return LayerTypeDot11WEP }
+func (m *Dot11WEP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeDot11WEP(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11WEP{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+// Dot11Data is a base for all IEEE 802.11 data layers.
+type Dot11Data struct {
+	BaseLayer
+}
+
+func (m *Dot11Data) NextLayerType() gopacket.LayerType {
+	return LayerTypeLLC
+}
+
+func (m *Dot11Data) LayerType() gopacket.LayerType  { return LayerTypeDot11Data }
+func (m *Dot11Data) CanDecode() gopacket.LayerClass { return LayerTypeDot11Data }
+func (m *Dot11Data) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Payload = data
+	return nil
+}
+
+func decodeDot11Data(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11Data{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11DataCFAck struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAck) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAck }
+func (m *Dot11DataCFAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPoll struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPoll) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFPoll }
+func (m *Dot11DataCFPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPoll struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAckPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPoll) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckPoll }
+func (m *Dot11DataCFAckPoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataNull struct {
+	Dot11Data
+}
+
+func decodeDot11DataNull(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataNull{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataNull) LayerType() gopacket.LayerType  { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataNull }
+func (m *Dot11DataNull) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckNoData struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAckNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAckNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckNoData) LayerType() gopacket.LayerType  { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataCFAckNoData }
+func (m *Dot11DataCFAckNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFPollNoData struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFPollNoData) LayerType() gopacket.LayerType { return LayerTypeDot11DataCFPollNoData }
+func (m *Dot11DataCFPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataCFPollNoData
+}
+func (m *Dot11DataCFPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataCFAckPollNoData struct {
+	Dot11Data
+}
+
+func decodeDot11DataCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataCFAckPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataCFAckPollNoData) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataCFAckPollNoData
+}
+func (m *Dot11DataCFAckPollNoData) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Data.DecodeFromBytes(data, df)
+}
+
+type Dot11DataQOS struct {
+	Dot11Ctrl
+}
+
+func (m *Dot11DataQOS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.BaseLayer = BaseLayer{Payload: data}
+	return nil
+}
+
+type Dot11DataQOSData struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSData) LayerType() gopacket.LayerType  { return LayerTypeDot11DataQOSData }
+func (m *Dot11DataQOSData) CanDecode() gopacket.LayerClass { return LayerTypeDot11DataQOSData }
+
+func (m *Dot11DataQOSData) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11Data
+}
+
+type Dot11DataQOSDataCFAck struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSDataCFAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAck) LayerType() gopacket.LayerType { return LayerTypeDot11DataQOSDataCFAck }
+func (m *Dot11DataQOSDataCFAck) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSDataCFAck
+}
+func (m *Dot11DataQOSDataCFAck) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFAck }
+
+type Dot11DataQOSDataCFPoll struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSDataCFPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFPoll) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSDataCFPoll
+}
+func (m *Dot11DataQOSDataCFPoll) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataCFPoll }
+
+type Dot11DataQOSDataCFAckPoll struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSDataCFAckPoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSDataCFAckPoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSDataCFAckPoll) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSDataCFAckPoll
+}
+func (m *Dot11DataQOSDataCFAckPoll) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFAckPoll
+}
+
+type Dot11DataQOSNull struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSNull(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSNull{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSNull) LayerType() gopacket.LayerType     { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) CanDecode() gopacket.LayerClass    { return LayerTypeDot11DataQOSNull }
+func (m *Dot11DataQOSNull) NextLayerType() gopacket.LayerType { return LayerTypeDot11DataNull }
+
+type Dot11DataQOSCFPollNoData struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSCFPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFPollNoData) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSCFPollNoData
+}
+func (m *Dot11DataQOSCFPollNoData) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFPollNoData
+}
+
+type Dot11DataQOSCFAckPollNoData struct {
+	Dot11DataQOS
+}
+
+func decodeDot11DataQOSCFAckPollNoData(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11DataQOSCFAckPollNoData{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11DataQOSCFAckPollNoData) LayerType() gopacket.LayerType {
+	return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11DataQOSCFAckPollNoData
+}
+func (m *Dot11DataQOSCFAckPollNoData) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11DataCFAckPollNoData
+}
+
+type Dot11InformationElement struct {
+	BaseLayer
+	ID     Dot11InformationElementID
+	Length uint8
+	OUI    []byte
+	Info   []byte
+}
+
+func (m *Dot11InformationElement) LayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11InformationElement) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+func (m *Dot11InformationElement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), 2)
+	}
+	m.ID = Dot11InformationElementID(data[0])
+	m.Length = data[1]
+	offset := int(2)
+
+	if len(data) < offset+int(m.Length) {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11InformationElement length %v too short, %v required", len(data), offset+int(m.Length))
+	}
+	if len(data) < offset+4 {
+		df.SetTruncated()
+		return fmt.Errorf("vendor extension size < %d", offset+int(m.Length))
+	}
+	if m.ID == 221 {
+		// Vendor extension
+		m.OUI = data[offset : offset+4]
+		m.Info = data[offset+4 : offset+int(m.Length)]
+	} else {
+		m.Info = data[offset : offset+int(m.Length)]
+	}
+
+	offset += int(m.Length)
+
+	m.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]}
+	return nil
+}
+
+func (d *Dot11InformationElement) String() string {
+	if d.ID == 0 {
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, SSID: %v)", d.ID, d.Length, string(d.Info))
+	} else if d.ID == 1 {
+		rates := ""
+		for i := 0; i < len(d.Info); i++ {
+			if d.Info[i]&0x80 == 0 {
+				rates += fmt.Sprintf("%.1f ", float32(d.Info[i])*0.5)
+			} else {
+				rates += fmt.Sprintf("%.1f* ", float32(d.Info[i]&0x7F)*0.5)
+			}
+		}
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Rates: %s Mbit)", d.ID, d.Length, rates)
+	} else if d.ID == 221 {
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, OUI: %X, Info: %X)", d.ID, d.Length, d.OUI, d.Info)
+	} else {
+		return fmt.Sprintf("802.11 Information Element (ID: %v, Length: %v, Info: %X)", d.ID, d.Length, d.Info)
+	}
+}
+
+func (m Dot11InformationElement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	length := len(m.Info) + len(m.OUI)
+	if buf, err := b.PrependBytes(2 + length); err != nil {
+		return err
+	} else {
+		buf[0] = uint8(m.ID)
+		buf[1] = uint8(length)
+		copy(buf[2:], m.OUI)
+		copy(buf[2+len(m.OUI):], m.Info)
+	}
+	return nil
+}
+
+func decodeDot11InformationElement(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11InformationElement{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type Dot11CtrlCTS struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlCTS(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlCTS{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCTS) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlCTS
+}
+func (m *Dot11CtrlCTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlRTS struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlRTS(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlRTS{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlRTS) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlRTS
+}
+func (m *Dot11CtrlRTS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAckReq struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAckReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlBlockAckReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAckReq) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlBlockAckReq
+}
+func (m *Dot11CtrlBlockAckReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlBlockAck struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlBlockAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlBlockAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlBlockAck) LayerType() gopacket.LayerType  { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlBlockAck }
+func (m *Dot11CtrlBlockAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlPowersavePoll struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlPowersavePoll(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlPowersavePoll{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlPowersavePoll) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlPowersavePoll
+}
+func (m *Dot11CtrlPowersavePoll) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlAck struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlAck) LayerType() gopacket.LayerType  { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11CtrlAck }
+func (m *Dot11CtrlAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEnd struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEnd(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlCFEnd{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEnd) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlCFEnd
+}
+func (m *Dot11CtrlCFEnd) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11CtrlCFEndAck struct {
+	Dot11Ctrl
+}
+
+func decodeDot11CtrlCFEndAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11CtrlCFEndAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11CtrlCFEndAck) LayerType() gopacket.LayerType {
+	return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11CtrlCFEndAck
+}
+func (m *Dot11CtrlCFEndAck) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	return m.Dot11Ctrl.DecodeFromBytes(data, df)
+}
+
+type Dot11MgmtAssociationReq struct {
+	Dot11Mgmt
+	CapabilityInfo uint16
+	ListenInterval uint16
+}
+
+func decodeDot11MgmtAssociationReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAssociationReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationReq) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtAssociationReq
+}
+func (m *Dot11MgmtAssociationReq) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtAssociationReq length %v too short, %v required", len(data), 4)
+	}
+	m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+	m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+	m.Payload = data[4:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(4)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+	binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+	return nil
+}
+
+type Dot11MgmtAssociationResp struct {
+	Dot11Mgmt
+	CapabilityInfo uint16
+	Status         Dot11Status
+	AID            uint16
+}
+
+func decodeDot11MgmtAssociationResp(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAssociationResp{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAssociationResp) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtAssociationResp
+}
+func (m *Dot11MgmtAssociationResp) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAssociationResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 6 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtAssociationResp length %v too short, %v required", len(data), 6)
+	}
+	m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+	m.Status = Dot11Status(binary.LittleEndian.Uint16(data[2:4]))
+	m.AID = binary.LittleEndian.Uint16(data[4:6])
+	m.Payload = data[6:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAssociationResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(6)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+	binary.LittleEndian.PutUint16(buf[2:4], uint16(m.Status))
+	binary.LittleEndian.PutUint16(buf[4:6], m.AID)
+
+	return nil
+}
+
+type Dot11MgmtReassociationReq struct {
+	Dot11Mgmt
+	CapabilityInfo   uint16
+	ListenInterval   uint16
+	CurrentApAddress net.HardwareAddr
+}
+
+func decodeDot11MgmtReassociationReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtReassociationReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationReq) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtReassociationReq
+}
+func (m *Dot11MgmtReassociationReq) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtReassociationReq) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 10 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtReassociationReq length %v too short, %v required", len(data), 10)
+	}
+	m.CapabilityInfo = binary.LittleEndian.Uint16(data[0:2])
+	m.ListenInterval = binary.LittleEndian.Uint16(data[2:4])
+	m.CurrentApAddress = net.HardwareAddr(data[4:10])
+	m.Payload = data[10:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtReassociationReq) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(10)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], m.CapabilityInfo)
+	binary.LittleEndian.PutUint16(buf[2:4], m.ListenInterval)
+
+	copy(buf[4:10], m.CurrentApAddress)
+
+	return nil
+}
+
+type Dot11MgmtReassociationResp struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtReassociationResp(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtReassociationResp{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtReassociationResp) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtReassociationResp
+}
+func (m *Dot11MgmtReassociationResp) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeReq struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtProbeReq(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtProbeReq{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeReq) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeReq }
+func (m *Dot11MgmtProbeReq) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+type Dot11MgmtProbeResp struct {
+	Dot11Mgmt
+	Timestamp uint64
+	Interval  uint16
+	Flags     uint16
+}
+
+func decodeDot11MgmtProbeResp(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtProbeResp{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtProbeResp) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtProbeResp }
+func (m *Dot11MgmtProbeResp) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		df.SetTruncated()
+
+		return fmt.Errorf("Dot11MgmtProbeResp length %v too short, %v required", len(data), 12)
+	}
+
+	m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+	m.Interval = binary.LittleEndian.Uint16(data[8:10])
+	m.Flags = binary.LittleEndian.Uint16(data[10:12])
+	m.Payload = data[12:]
+
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtProbeResp) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+
+func (m Dot11MgmtProbeResp) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(12)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+	binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+	binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+	return nil
+}
+
+type Dot11MgmtMeasurementPilot struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtMeasurementPilot(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtMeasurementPilot{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtMeasurementPilot) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtMeasurementPilot
+}
+func (m *Dot11MgmtMeasurementPilot) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtMeasurementPilot
+}
+
+type Dot11MgmtBeacon struct {
+	Dot11Mgmt
+	Timestamp uint64
+	Interval  uint16
+	Flags     uint16
+}
+
+func decodeDot11MgmtBeacon(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtBeacon{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtBeacon) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtBeacon }
+func (m *Dot11MgmtBeacon) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtBeacon length %v too short, %v required", len(data), 12)
+	}
+	m.Timestamp = binary.LittleEndian.Uint64(data[0:8])
+	m.Interval = binary.LittleEndian.Uint16(data[8:10])
+	m.Flags = binary.LittleEndian.Uint16(data[10:12])
+	m.Payload = data[12:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m *Dot11MgmtBeacon) NextLayerType() gopacket.LayerType { return LayerTypeDot11InformationElement }
+
+func (m Dot11MgmtBeacon) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(12)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint64(buf[0:8], m.Timestamp)
+	binary.LittleEndian.PutUint16(buf[8:10], m.Interval)
+	binary.LittleEndian.PutUint16(buf[10:12], m.Flags)
+
+	return nil
+}
+
+type Dot11MgmtATIM struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtATIM(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtATIM{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtATIM) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtATIM }
+func (m *Dot11MgmtATIM) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtATIM }
+
+type Dot11MgmtDisassociation struct {
+	Dot11Mgmt
+	Reason Dot11Reason
+}
+
+func decodeDot11MgmtDisassociation(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtDisassociation{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDisassociation) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtDisassociation
+}
+func (m *Dot11MgmtDisassociation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtDisassociation length %v too short, %v required", len(data), 2)
+	}
+	m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDisassociation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(2)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+	return nil
+}
+
+type Dot11MgmtAuthentication struct {
+	Dot11Mgmt
+	Algorithm Dot11Algorithm
+	Sequence  uint16
+	Status    Dot11Status
+}
+
+func decodeDot11MgmtAuthentication(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAuthentication{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAuthentication) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtAuthentication
+}
+func (m *Dot11MgmtAuthentication) NextLayerType() gopacket.LayerType {
+	return LayerTypeDot11InformationElement
+}
+func (m *Dot11MgmtAuthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 6 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtAuthentication length %v too short, %v required", len(data), 6)
+	}
+	m.Algorithm = Dot11Algorithm(binary.LittleEndian.Uint16(data[0:2]))
+	m.Sequence = binary.LittleEndian.Uint16(data[2:4])
+	m.Status = Dot11Status(binary.LittleEndian.Uint16(data[4:6]))
+	m.Payload = data[6:]
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtAuthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(6)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Algorithm))
+	binary.LittleEndian.PutUint16(buf[2:4], m.Sequence)
+	binary.LittleEndian.PutUint16(buf[4:6], uint16(m.Status))
+
+	return nil
+}
+
+type Dot11MgmtDeauthentication struct {
+	Dot11Mgmt
+	Reason Dot11Reason
+}
+
+func decodeDot11MgmtDeauthentication(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtDeauthentication{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtDeauthentication) LayerType() gopacket.LayerType {
+	return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot11MgmtDeauthentication
+}
+func (m *Dot11MgmtDeauthentication) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return fmt.Errorf("Dot11MgmtDeauthentication length %v too short, %v required", len(data), 2)
+	}
+	m.Reason = Dot11Reason(binary.LittleEndian.Uint16(data[0:2]))
+	return m.Dot11Mgmt.DecodeFromBytes(data, df)
+}
+
+func (m Dot11MgmtDeauthentication) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(2)
+
+	if err != nil {
+		return err
+	}
+
+	binary.LittleEndian.PutUint16(buf[0:2], uint16(m.Reason))
+
+	return nil
+}
+
+type Dot11MgmtAction struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtAction(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtAction{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtAction) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtAction }
+func (m *Dot11MgmtAction) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtAction }
+
+type Dot11MgmtActionNoAck struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtActionNoAck(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtActionNoAck{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtActionNoAck) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtActionNoAck }
+func (m *Dot11MgmtActionNoAck) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtActionNoAck }
+
+type Dot11MgmtArubaWLAN struct {
+	Dot11Mgmt
+}
+
+func decodeDot11MgmtArubaWLAN(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot11MgmtArubaWLAN{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *Dot11MgmtArubaWLAN) LayerType() gopacket.LayerType  { return LayerTypeDot11MgmtArubaWLAN }
+func (m *Dot11MgmtArubaWLAN) CanDecode() gopacket.LayerClass { return LayerTypeDot11MgmtArubaWLAN }
diff --git a/vendor/github.com/google/gopacket/layers/dot1q.go b/vendor/github.com/google/gopacket/layers/dot1q.go
new file mode 100644
index 0000000..5cdd2f8
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/dot1q.go
@@ -0,0 +1,75 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// Dot1Q is the packet layer for 802.1Q VLAN headers.
+type Dot1Q struct {
+	BaseLayer
+	Priority       uint8
+	DropEligible   bool
+	VLANIdentifier uint16
+	Type           EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeDot1Q
+func (d *Dot1Q) LayerType() gopacket.LayerType { return LayerTypeDot1Q }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (d *Dot1Q) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("802.1Q tag length %d too short", len(data))
+	}
+	d.Priority = (data[0] & 0xE0) >> 5
+	d.DropEligible = data[0]&0x10 != 0
+	d.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF
+	d.Type = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+	d.BaseLayer = BaseLayer{Contents: data[:4], Payload: data[4:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (d *Dot1Q) CanDecode() gopacket.LayerClass {
+	return LayerTypeDot1Q
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (d *Dot1Q) NextLayerType() gopacket.LayerType {
+	return d.Type.LayerType()
+}
+
+func decodeDot1Q(data []byte, p gopacket.PacketBuilder) error {
+	d := &Dot1Q{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *Dot1Q) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	if d.VLANIdentifier > 0xFFF {
+		return fmt.Errorf("vlan identifier %v is too high", d.VLANIdentifier)
+	}
+	firstBytes := uint16(d.Priority)<<13 | d.VLANIdentifier
+	if d.DropEligible {
+		firstBytes |= 0x1000
+	}
+	binary.BigEndian.PutUint16(bytes, firstBytes)
+	binary.BigEndian.PutUint16(bytes[2:], uint16(d.Type))
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/eap.go b/vendor/github.com/google/gopacket/layers/eap.go
new file mode 100644
index 0000000..54238e8
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eap.go
@@ -0,0 +1,114 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+type EAPCode uint8
+type EAPType uint8
+
+const (
+	EAPCodeRequest  EAPCode = 1
+	EAPCodeResponse EAPCode = 2
+	EAPCodeSuccess  EAPCode = 3
+	EAPCodeFailure  EAPCode = 4
+
+	// EAPTypeNone means that this EAP layer has no Type or TypeData.
+	// Success and Failure EAPs will have this set.
+	EAPTypeNone EAPType = 0
+
+	EAPTypeIdentity     EAPType = 1
+	EAPTypeNotification EAPType = 2
+	EAPTypeNACK         EAPType = 3
+	EAPTypeOTP          EAPType = 4
+	EAPTypeTokenCard    EAPType = 5
+)
+
+// EAP defines an Extensible Authentication Protocol (rfc 3748) layer.
+type EAP struct {
+	BaseLayer
+	Code     EAPCode
+	Id       uint8
+	Length   uint16
+	Type     EAPType
+	TypeData []byte
+}
+
+// LayerType returns LayerTypeEAP.
+func (e *EAP) LayerType() gopacket.LayerType { return LayerTypeEAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("EAP length %d too short", len(data))
+	}
+	e.Code = EAPCode(data[0])
+	e.Id = data[1]
+	e.Length = binary.BigEndian.Uint16(data[2:4])
+	if len(data) < int(e.Length) {
+		df.SetTruncated()
+		return fmt.Errorf("EAP length %d too short, %d expected", len(data), e.Length)
+	}
+	switch {
+	case e.Length > 4:
+		e.Type = EAPType(data[4])
+		e.TypeData = data[5:]
+	case e.Length == 4:
+		e.Type = 0
+		e.TypeData = nil
+	default:
+		return fmt.Errorf("invalid EAP length %d", e.Length)
+	}
+	e.BaseLayer.Contents = data[:e.Length]
+	e.BaseLayer.Payload = data[e.Length:] // Should be 0 bytes
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (e *EAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if opts.FixLengths {
+		e.Length = uint16(len(e.TypeData) + 1)
+	}
+	size := len(e.TypeData) + 4
+	if size > 4 {
+		size++
+	}
+	bytes, err := b.PrependBytes(size)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(e.Code)
+	bytes[1] = e.Id
+	binary.BigEndian.PutUint16(bytes[2:], e.Length)
+	if size > 4 {
+		bytes[4] = byte(e.Type)
+		copy(bytes[5:], e.TypeData)
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAP) CanDecode() gopacket.LayerClass {
+	return LayerTypeEAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+func decodeEAP(data []byte, p gopacket.PacketBuilder) error {
+	e := &EAP{}
+	return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/eapol.go b/vendor/github.com/google/gopacket/layers/eapol.go
new file mode 100644
index 0000000..902598a
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/eapol.go
@@ -0,0 +1,302 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+// EAPOL defines an EAP over LAN (802.1x) layer.
+type EAPOL struct {
+	BaseLayer
+	Version uint8
+	Type    EAPOLType
+	Length  uint16
+}
+
+// LayerType returns LayerTypeEAPOL.
+func (e *EAPOL) LayerType() gopacket.LayerType { return LayerTypeEAPOL }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EAPOL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("EAPOL length %d too short", len(data))
+	}
+	e.Version = data[0]
+	e.Type = EAPOLType(data[1])
+	e.Length = binary.BigEndian.Uint16(data[2:4])
+	e.BaseLayer = BaseLayer{data[:4], data[4:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer
+func (e *EAPOL) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, _ := b.PrependBytes(4)
+	bytes[0] = e.Version
+	bytes[1] = byte(e.Type)
+	binary.BigEndian.PutUint16(bytes[2:], e.Length)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EAPOL) CanDecode() gopacket.LayerClass {
+	return LayerTypeEAPOL
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EAPOL) NextLayerType() gopacket.LayerType {
+	return e.Type.LayerType()
+}
+
+func decodeEAPOL(data []byte, p gopacket.PacketBuilder) error {
+	e := &EAPOL{}
+	return decodingLayerDecoder(e, data, p)
+}
+
+// EAPOLKeyDescriptorType is an enumeration of key descriptor types
+// as specified by 802.1x in the EAPOL-Key frame
+type EAPOLKeyDescriptorType uint8
+
+// Enumeration of EAPOLKeyDescriptorType
+const (
+	EAPOLKeyDescriptorTypeRC4   EAPOLKeyDescriptorType = 1
+	EAPOLKeyDescriptorTypeDot11 EAPOLKeyDescriptorType = 2
+	EAPOLKeyDescriptorTypeWPA   EAPOLKeyDescriptorType = 254
+)
+
+func (kdt EAPOLKeyDescriptorType) String() string {
+	switch kdt {
+	case EAPOLKeyDescriptorTypeRC4:
+		return "RC4"
+	case EAPOLKeyDescriptorTypeDot11:
+		return "802.11"
+	case EAPOLKeyDescriptorTypeWPA:
+		return "WPA"
+	default:
+		return fmt.Sprintf("unknown descriptor type %d", kdt)
+	}
+}
+
+// EAPOLKeyDescriptorVersion is an enumeration of versions specifying the
+// encryption algorithm for the key data and the authentication for the
+// message integrity code (MIC)
+type EAPOLKeyDescriptorVersion uint8
+
+// Enumeration of EAPOLKeyDescriptorVersion
+const (
+	EAPOLKeyDescriptorVersionOther       EAPOLKeyDescriptorVersion = 0
+	EAPOLKeyDescriptorVersionRC4HMACMD5  EAPOLKeyDescriptorVersion = 1
+	EAPOLKeyDescriptorVersionAESHMACSHA1 EAPOLKeyDescriptorVersion = 2
+	EAPOLKeyDescriptorVersionAES128CMAC  EAPOLKeyDescriptorVersion = 3
+)
+
+func (v EAPOLKeyDescriptorVersion) String() string {
+	switch v {
+	case EAPOLKeyDescriptorVersionOther:
+		return "Other"
+	case EAPOLKeyDescriptorVersionRC4HMACMD5:
+		return "RC4-HMAC-MD5"
+	case EAPOLKeyDescriptorVersionAESHMACSHA1:
+		return "AES-HMAC-SHA1-128"
+	case EAPOLKeyDescriptorVersionAES128CMAC:
+		return "AES-128-CMAC"
+	default:
+		return fmt.Sprintf("unknown version %d", v)
+	}
+}
+
+// EAPOLKeyType is an enumeration of key derivation types describing
+// the purpose of the keys being derived.
+type EAPOLKeyType uint8
+
+// Enumeration of EAPOLKeyType
+const (
+	EAPOLKeyTypeGroupSMK EAPOLKeyType = 0
+	EAPOLKeyTypePairwise EAPOLKeyType = 1
+)
+
+func (kt EAPOLKeyType) String() string {
+	switch kt {
+	case EAPOLKeyTypeGroupSMK:
+		return "Group/SMK"
+	case EAPOLKeyTypePairwise:
+		return "Pairwise"
+	default:
+		return fmt.Sprintf("unknown key type %d", kt)
+	}
+}
+
+// EAPOLKey defines an EAPOL-Key frame for 802.1x authentication
+type EAPOLKey struct {
+	BaseLayer
+	KeyDescriptorType    EAPOLKeyDescriptorType
+	KeyDescriptorVersion EAPOLKeyDescriptorVersion
+	KeyType              EAPOLKeyType
+	KeyIndex             uint8
+	Install              bool
+	KeyACK               bool
+	KeyMIC               bool
+	Secure               bool
+	MICError             bool
+	Request              bool
+	HasEncryptedKeyData  bool
+	SMKMessage           bool
+	KeyLength            uint16
+	ReplayCounter        uint64
+	Nonce                []byte
+	IV                   []byte
+	RSC                  uint64
+	ID                   uint64
+	MIC                  []byte
+	KeyDataLength        uint16
+	EncryptedKeyData     []byte
+}
+
+// LayerType returns LayerTypeEAPOLKey.
+func (ek *EAPOLKey) LayerType() gopacket.LayerType {
+	return LayerTypeEAPOLKey
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ek *EAPOLKey) CanDecode() gopacket.LayerType {
+	return LayerTypeEAPOLKey
+}
+
+// NextLayerType returns layers.LayerTypeDot11InformationElement if the key
+// data exists and is unencrypted, otherwise it does not expect a next layer.
+func (ek *EAPOLKey) NextLayerType() gopacket.LayerType {
+	if !ek.HasEncryptedKeyData && ek.KeyDataLength > 0 {
+		return LayerTypeDot11InformationElement
+	}
+	return gopacket.LayerTypePayload
+}
+
+const eapolKeyFrameLen = 95
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ek *EAPOLKey) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < eapolKeyFrameLen {
+		df.SetTruncated()
+		return fmt.Errorf("EAPOLKey length %v too short, %v required",
+			len(data), eapolKeyFrameLen)
+	}
+
+	ek.KeyDescriptorType = EAPOLKeyDescriptorType(data[0])
+
+	info := binary.BigEndian.Uint16(data[1:3])
+	ek.KeyDescriptorVersion = EAPOLKeyDescriptorVersion(info & 0x0007)
+	ek.KeyType = EAPOLKeyType((info & 0x0008) >> 3)
+	ek.KeyIndex = uint8((info & 0x0030) >> 4)
+	ek.Install = (info & 0x0040) != 0
+	ek.KeyACK = (info & 0x0080) != 0
+	ek.KeyMIC = (info & 0x0100) != 0
+	ek.Secure = (info & 0x0200) != 0
+	ek.MICError = (info & 0x0400) != 0
+	ek.Request = (info & 0x0800) != 0
+	ek.HasEncryptedKeyData = (info & 0x1000) != 0
+	ek.SMKMessage = (info & 0x2000) != 0
+
+	ek.KeyLength = binary.BigEndian.Uint16(data[3:5])
+	ek.ReplayCounter = binary.BigEndian.Uint64(data[5:13])
+
+	ek.Nonce = data[13:45]
+	ek.IV = data[45:61]
+	ek.RSC = binary.BigEndian.Uint64(data[61:69])
+	ek.ID = binary.BigEndian.Uint64(data[69:77])
+	ek.MIC = data[77:93]
+
+	ek.KeyDataLength = binary.BigEndian.Uint16(data[93:95])
+
+	totalLength := eapolKeyFrameLen + int(ek.KeyDataLength)
+	if len(data) < totalLength {
+		df.SetTruncated()
+		return fmt.Errorf("EAPOLKey data length %d too short, %d required",
+			len(data)-eapolKeyFrameLen, ek.KeyDataLength)
+	}
+
+	if ek.HasEncryptedKeyData {
+		ek.EncryptedKeyData = data[eapolKeyFrameLen:totalLength]
+		ek.BaseLayer = BaseLayer{
+			Contents: data[:totalLength],
+			Payload:  data[totalLength:],
+		}
+	} else {
+		ek.BaseLayer = BaseLayer{
+			Contents: data[:eapolKeyFrameLen],
+			Payload:  data[eapolKeyFrameLen:],
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ek *EAPOLKey) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(eapolKeyFrameLen + len(ek.EncryptedKeyData))
+	if err != nil {
+		return err
+	}
+
+	buf[0] = byte(ek.KeyDescriptorType)
+
+	var info uint16
+	info |= uint16(ek.KeyDescriptorVersion)
+	info |= uint16(ek.KeyType) << 3
+	info |= uint16(ek.KeyIndex) << 4
+	if ek.Install {
+		info |= 0x0040
+	}
+	if ek.KeyACK {
+		info |= 0x0080
+	}
+	if ek.KeyMIC {
+		info |= 0x0100
+	}
+	if ek.Secure {
+		info |= 0x0200
+	}
+	if ek.MICError {
+		info |= 0x0400
+	}
+	if ek.Request {
+		info |= 0x0800
+	}
+	if ek.HasEncryptedKeyData {
+		info |= 0x1000
+	}
+	if ek.SMKMessage {
+		info |= 0x2000
+	}
+	binary.BigEndian.PutUint16(buf[1:3], info)
+
+	binary.BigEndian.PutUint16(buf[3:5], ek.KeyLength)
+	binary.BigEndian.PutUint64(buf[5:13], ek.ReplayCounter)
+
+	copy(buf[13:45], ek.Nonce)
+	copy(buf[45:61], ek.IV)
+	binary.BigEndian.PutUint64(buf[61:69], ek.RSC)
+	binary.BigEndian.PutUint64(buf[69:77], ek.ID)
+	copy(buf[77:93], ek.MIC)
+
+	binary.BigEndian.PutUint16(buf[93:95], ek.KeyDataLength)
+	if len(ek.EncryptedKeyData) > 0 {
+		copy(buf[95:95+len(ek.EncryptedKeyData)], ek.EncryptedKeyData)
+	}
+
+	return nil
+}
+
+func decodeEAPOLKey(data []byte, p gopacket.PacketBuilder) error {
+	ek := &EAPOLKey{}
+	return decodingLayerDecoder(ek, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/endpoints.go b/vendor/github.com/google/gopacket/layers/endpoints.go
new file mode 100644
index 0000000..4c91cc3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/endpoints.go
@@ -0,0 +1,97 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+	"net"
+	"strconv"
+)
+
+var (
+	// We use two different endpoint types for IPv4 vs IPv6 addresses, so that
+	// ordering with endpointA.LessThan(endpointB) sanely groups all IPv4
+	// addresses and all IPv6 addresses, such that IPv6 > IPv4 for all addresses.
+	EndpointIPv4 = gopacket.RegisterEndpointType(1, gopacket.EndpointTypeMetadata{Name: "IPv4", Formatter: func(b []byte) string {
+		return net.IP(b).String()
+	}})
+	EndpointIPv6 = gopacket.RegisterEndpointType(2, gopacket.EndpointTypeMetadata{Name: "IPv6", Formatter: func(b []byte) string {
+		return net.IP(b).String()
+	}})
+
+	EndpointMAC = gopacket.RegisterEndpointType(3, gopacket.EndpointTypeMetadata{Name: "MAC", Formatter: func(b []byte) string {
+		return net.HardwareAddr(b).String()
+	}})
+	EndpointTCPPort = gopacket.RegisterEndpointType(4, gopacket.EndpointTypeMetadata{Name: "TCP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointUDPPort = gopacket.RegisterEndpointType(5, gopacket.EndpointTypeMetadata{Name: "UDP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointSCTPPort = gopacket.RegisterEndpointType(6, gopacket.EndpointTypeMetadata{Name: "SCTP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointRUDPPort = gopacket.RegisterEndpointType(7, gopacket.EndpointTypeMetadata{Name: "RUDP", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(b[0]))
+	}})
+	EndpointUDPLitePort = gopacket.RegisterEndpointType(8, gopacket.EndpointTypeMetadata{Name: "UDPLite", Formatter: func(b []byte) string {
+		return strconv.Itoa(int(binary.BigEndian.Uint16(b)))
+	}})
+	EndpointPPP = gopacket.RegisterEndpointType(9, gopacket.EndpointTypeMetadata{Name: "PPP", Formatter: func([]byte) string {
+		return "point"
+	}})
+)
+
+// NewIPEndpoint creates a new IP (v4 or v6) endpoint from a net.IP address.
+// It returns gopacket.InvalidEndpoint if the IP address is invalid.
+func NewIPEndpoint(a net.IP) gopacket.Endpoint {
+	ipv4 := a.To4()
+	if ipv4 != nil {
+		return gopacket.NewEndpoint(EndpointIPv4, []byte(ipv4))
+	}
+
+	ipv6 := a.To16()
+	if ipv6 != nil {
+		return gopacket.NewEndpoint(EndpointIPv6, []byte(ipv6))
+	}
+
+	return gopacket.InvalidEndpoint
+}
+
+// NewMACEndpoint returns a new MAC address endpoint.
+func NewMACEndpoint(a net.HardwareAddr) gopacket.Endpoint {
+	return gopacket.NewEndpoint(EndpointMAC, []byte(a))
+}
+func newPortEndpoint(t gopacket.EndpointType, p uint16) gopacket.Endpoint {
+	return gopacket.NewEndpoint(t, []byte{byte(p >> 8), byte(p)})
+}
+
+// NewTCPPortEndpoint returns an endpoint based on a TCP port.
+func NewTCPPortEndpoint(p TCPPort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointTCPPort, uint16(p))
+}
+
+// NewUDPPortEndpoint returns an endpoint based on a UDP port.
+func NewUDPPortEndpoint(p UDPPort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointUDPPort, uint16(p))
+}
+
+// NewSCTPPortEndpoint returns an endpoint based on a SCTP port.
+func NewSCTPPortEndpoint(p SCTPPort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointSCTPPort, uint16(p))
+}
+
+// NewRUDPPortEndpoint returns an endpoint based on a RUDP port.
+func NewRUDPPortEndpoint(p RUDPPort) gopacket.Endpoint {
+	return gopacket.NewEndpoint(EndpointRUDPPort, []byte{byte(p)})
+}
+
+// NewUDPLitePortEndpoint returns an endpoint based on a UDPLite port.
+func NewUDPLitePortEndpoint(p UDPLitePort) gopacket.Endpoint {
+	return newPortEndpoint(EndpointUDPLitePort, uint16(p))
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums.go b/vendor/github.com/google/gopacket/layers/enums.go
new file mode 100644
index 0000000..222e2b0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums.go
@@ -0,0 +1,445 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"fmt"
+	"runtime"
+
+	"github.com/google/gopacket"
+)
+
+// EnumMetadata keeps track of a set of metadata for each enumeration value
+// for protocol enumerations.
+type EnumMetadata struct {
+	// DecodeWith is the decoder to use to decode this protocol's data.
+	DecodeWith gopacket.Decoder
+	// Name is the name of the enumeration value.
+	Name string
+	// LayerType is the layer type implied by the given enum.
+	LayerType gopacket.LayerType
+}
+
+// EthernetType is an enumeration of ethernet type values, and acts as a decoder
+// for any type it supports.
+type EthernetType uint16
+
+const (
+	// EthernetTypeLLC is not an actual ethernet type.  It is instead a
+	// placeholder we use in Ethernet frames that use the 802.3 standard of
+	// srcmac|dstmac|length|LLC instead of srcmac|dstmac|ethertype.
+	EthernetTypeLLC                         EthernetType = 0
+	EthernetTypeIPv4                        EthernetType = 0x0800
+	EthernetTypeARP                         EthernetType = 0x0806
+	EthernetTypeIPv6                        EthernetType = 0x86DD
+	EthernetTypeCiscoDiscovery              EthernetType = 0x2000
+	EthernetTypeNortelDiscovery             EthernetType = 0x01a2
+	EthernetTypeTransparentEthernetBridging EthernetType = 0x6558
+	EthernetTypeDot1Q                       EthernetType = 0x8100
+	EthernetTypePPP                         EthernetType = 0x880b
+	EthernetTypePPPoEDiscovery              EthernetType = 0x8863
+	EthernetTypePPPoESession                EthernetType = 0x8864
+	EthernetTypeMPLSUnicast                 EthernetType = 0x8847
+	EthernetTypeMPLSMulticast               EthernetType = 0x8848
+	EthernetTypeEAPOL                       EthernetType = 0x888e
+	EthernetTypeERSPAN                      EthernetType = 0x88be
+	EthernetTypeQinQ                        EthernetType = 0x88a8
+	EthernetTypeLinkLayerDiscovery          EthernetType = 0x88cc
+	EthernetTypeEthernetCTP                 EthernetType = 0x9000
+	EthernetTypeDot1QDoubleTag              EthernetType = 0x9100
+	EthernetTypeQinQDoubleTag               EthernetType = 0x9200
+)
+
+// IPProtocol is an enumeration of IP protocol values, and acts as a decoder
+// for any type it supports.
+type IPProtocol uint8
+
+const (
+	IPProtocolIPv6HopByHop    IPProtocol = 0
+	IPProtocolICMPv4          IPProtocol = 1
+	IPProtocolIGMP            IPProtocol = 2
+	IPProtocolIPv4            IPProtocol = 4
+	IPProtocolTCP             IPProtocol = 6
+	IPProtocolUDP             IPProtocol = 17
+	IPProtocolRUDP            IPProtocol = 27
+	IPProtocolIPv6            IPProtocol = 41
+	IPProtocolIPv6Routing     IPProtocol = 43
+	IPProtocolIPv6Fragment    IPProtocol = 44
+	IPProtocolGRE             IPProtocol = 47
+	IPProtocolESP             IPProtocol = 50
+	IPProtocolAH              IPProtocol = 51
+	IPProtocolICMPv6          IPProtocol = 58
+	IPProtocolNoNextHeader    IPProtocol = 59
+	IPProtocolIPv6Destination IPProtocol = 60
+	IPProtocolOSPF            IPProtocol = 89
+	IPProtocolIPIP            IPProtocol = 94
+	IPProtocolEtherIP         IPProtocol = 97
+	IPProtocolVRRP            IPProtocol = 112
+	IPProtocolSCTP            IPProtocol = 132
+	IPProtocolUDPLite         IPProtocol = 136
+	IPProtocolMPLSInIP        IPProtocol = 137
+)
+
+// LinkType is an enumeration of link types, and acts as a decoder for any
+// link type it supports.
+type LinkType uint8
+
+const (
+	// According to pcap-linktype(7) and http://www.tcpdump.org/linktypes.html
+	LinkTypeNull           LinkType = 0
+	LinkTypeEthernet       LinkType = 1
+	LinkTypeAX25           LinkType = 3
+	LinkTypeTokenRing      LinkType = 6
+	LinkTypeArcNet         LinkType = 7
+	LinkTypeSLIP           LinkType = 8
+	LinkTypePPP            LinkType = 9
+	LinkTypeFDDI           LinkType = 10
+	LinkTypePPP_HDLC       LinkType = 50
+	LinkTypePPPEthernet    LinkType = 51
+	LinkTypeATM_RFC1483    LinkType = 100
+	LinkTypeRaw            LinkType = 101
+	LinkTypeC_HDLC         LinkType = 104
+	LinkTypeIEEE802_11     LinkType = 105
+	LinkTypeFRelay         LinkType = 107
+	LinkTypeLoop           LinkType = 108
+	LinkTypeLinuxSLL       LinkType = 113
+	LinkTypeLTalk          LinkType = 114
+	LinkTypePFLog          LinkType = 117
+	LinkTypePrismHeader    LinkType = 119
+	LinkTypeIPOverFC       LinkType = 122
+	LinkTypeSunATM         LinkType = 123
+	LinkTypeIEEE80211Radio LinkType = 127
+	LinkTypeARCNetLinux    LinkType = 129
+	LinkTypeIPOver1394     LinkType = 138
+	LinkTypeMTP2Phdr       LinkType = 139
+	LinkTypeMTP2           LinkType = 140
+	LinkTypeMTP3           LinkType = 141
+	LinkTypeSCCP           LinkType = 142
+	LinkTypeDOCSIS         LinkType = 143
+	LinkTypeLinuxIRDA      LinkType = 144
+	LinkTypeLinuxLAPD      LinkType = 177
+	LinkTypeLinuxUSB       LinkType = 220
+	LinkTypeFC2            LinkType = 224
+	LinkTypeFC2Framed      LinkType = 225
+	LinkTypeIPv4           LinkType = 228
+	LinkTypeIPv6           LinkType = 229
+)
+
+// PPPoECode is the PPPoE code enum, taken from http://tools.ietf.org/html/rfc2516
+type PPPoECode uint8
+
+const (
+	PPPoECodePADI    PPPoECode = 0x09
+	PPPoECodePADO    PPPoECode = 0x07
+	PPPoECodePADR    PPPoECode = 0x19
+	PPPoECodePADS    PPPoECode = 0x65
+	PPPoECodePADT    PPPoECode = 0xA7
+	PPPoECodeSession PPPoECode = 0x00
+)
+
+// PPPType is an enumeration of PPP type values, and acts as a decoder for any
+// type it supports.
+type PPPType uint16
+
+const (
+	PPPTypeIPv4          PPPType = 0x0021
+	PPPTypeIPv6          PPPType = 0x0057
+	PPPTypeMPLSUnicast   PPPType = 0x0281
+	PPPTypeMPLSMulticast PPPType = 0x0283
+)
+
+// SCTPChunkType is an enumeration of chunk types inside SCTP packets.
+type SCTPChunkType uint8
+
+const (
+	SCTPChunkTypeData             SCTPChunkType = 0
+	SCTPChunkTypeInit             SCTPChunkType = 1
+	SCTPChunkTypeInitAck          SCTPChunkType = 2
+	SCTPChunkTypeSack             SCTPChunkType = 3
+	SCTPChunkTypeHeartbeat        SCTPChunkType = 4
+	SCTPChunkTypeHeartbeatAck     SCTPChunkType = 5
+	SCTPChunkTypeAbort            SCTPChunkType = 6
+	SCTPChunkTypeShutdown         SCTPChunkType = 7
+	SCTPChunkTypeShutdownAck      SCTPChunkType = 8
+	SCTPChunkTypeError            SCTPChunkType = 9
+	SCTPChunkTypeCookieEcho       SCTPChunkType = 10
+	SCTPChunkTypeCookieAck        SCTPChunkType = 11
+	SCTPChunkTypeShutdownComplete SCTPChunkType = 14
+)
+
+// FDDIFrameControl is an enumeration of FDDI frame control bytes.
+type FDDIFrameControl uint8
+
+const (
+	FDDIFrameControlLLC FDDIFrameControl = 0x50
+)
+
+// EAPOLType is an enumeration of EAPOL packet types.
+type EAPOLType uint8
+
+const (
+	EAPOLTypeEAP      EAPOLType = 0
+	EAPOLTypeStart    EAPOLType = 1
+	EAPOLTypeLogOff   EAPOLType = 2
+	EAPOLTypeKey      EAPOLType = 3
+	EAPOLTypeASFAlert EAPOLType = 4
+)
+
+// ProtocolFamily is the set of values defined as PF_* in sys/socket.h
+type ProtocolFamily uint8
+
+const (
+	ProtocolFamilyIPv4 ProtocolFamily = 2
+	// BSDs use different values for INET6... glory be.  These values taken from
+	// tcpdump 4.3.0.
+	ProtocolFamilyIPv6BSD     ProtocolFamily = 24
+	ProtocolFamilyIPv6FreeBSD ProtocolFamily = 28
+	ProtocolFamilyIPv6Darwin  ProtocolFamily = 30
+	ProtocolFamilyIPv6Linux   ProtocolFamily = 10
+)
+
+// Dot11Type is a combination of IEEE 802.11 frame's Type and Subtype fields.
+// By combining these two fields together into a single type, we're able to
+// provide a String function that correctly displays the subtype given the
+// top-level type.
+//
+// If you just care about the top-level type, use the MainType function.
+type Dot11Type uint8
+
+// MainType strips the subtype information from the given type,
+// returning just the overarching type (Mgmt, Ctrl, Data, Reserved).
+func (d Dot11Type) MainType() Dot11Type {
+	return d & dot11TypeMask
+}
+
+func (d Dot11Type) QOS() bool {
+	return d&dot11QOSMask == Dot11TypeDataQOSData
+}
+
+const (
+	Dot11TypeMgmt     Dot11Type = 0x00
+	Dot11TypeCtrl     Dot11Type = 0x01
+	Dot11TypeData     Dot11Type = 0x02
+	Dot11TypeReserved Dot11Type = 0x03
+	dot11TypeMask               = 0x03
+	dot11QOSMask                = 0x23
+
+	// The following are type/subtype conglomerations.
+
+	// Management
+	Dot11TypeMgmtAssociationReq    Dot11Type = 0x00
+	Dot11TypeMgmtAssociationResp   Dot11Type = 0x04
+	Dot11TypeMgmtReassociationReq  Dot11Type = 0x08
+	Dot11TypeMgmtReassociationResp Dot11Type = 0x0c
+	Dot11TypeMgmtProbeReq          Dot11Type = 0x10
+	Dot11TypeMgmtProbeResp         Dot11Type = 0x14
+	Dot11TypeMgmtMeasurementPilot  Dot11Type = 0x18
+	Dot11TypeMgmtBeacon            Dot11Type = 0x20
+	Dot11TypeMgmtATIM              Dot11Type = 0x24
+	Dot11TypeMgmtDisassociation    Dot11Type = 0x28
+	Dot11TypeMgmtAuthentication    Dot11Type = 0x2c
+	Dot11TypeMgmtDeauthentication  Dot11Type = 0x30
+	Dot11TypeMgmtAction            Dot11Type = 0x34
+	Dot11TypeMgmtActionNoAck       Dot11Type = 0x38
+
+	// Control
+	Dot11TypeCtrlWrapper       Dot11Type = 0x1d
+	Dot11TypeCtrlBlockAckReq   Dot11Type = 0x21
+	Dot11TypeCtrlBlockAck      Dot11Type = 0x25
+	Dot11TypeCtrlPowersavePoll Dot11Type = 0x29
+	Dot11TypeCtrlRTS           Dot11Type = 0x2d
+	Dot11TypeCtrlCTS           Dot11Type = 0x31
+	Dot11TypeCtrlAck           Dot11Type = 0x35
+	Dot11TypeCtrlCFEnd         Dot11Type = 0x39
+	Dot11TypeCtrlCFEndAck      Dot11Type = 0x3d
+
+	// Data
+	Dot11TypeDataCFAck              Dot11Type = 0x06
+	Dot11TypeDataCFPoll             Dot11Type = 0x0a
+	Dot11TypeDataCFAckPoll          Dot11Type = 0x0e
+	Dot11TypeDataNull               Dot11Type = 0x12
+	Dot11TypeDataCFAckNoData        Dot11Type = 0x16
+	Dot11TypeDataCFPollNoData       Dot11Type = 0x1a
+	Dot11TypeDataCFAckPollNoData    Dot11Type = 0x1e
+	Dot11TypeDataQOSData            Dot11Type = 0x22
+	Dot11TypeDataQOSDataCFAck       Dot11Type = 0x26
+	Dot11TypeDataQOSDataCFPoll      Dot11Type = 0x2a
+	Dot11TypeDataQOSDataCFAckPoll   Dot11Type = 0x2e
+	Dot11TypeDataQOSNull            Dot11Type = 0x32
+	Dot11TypeDataQOSCFPollNoData    Dot11Type = 0x3a
+	Dot11TypeDataQOSCFAckPollNoData Dot11Type = 0x3e
+)
+
+// Decode a raw v4 or v6 IP packet.
+func decodeIPv4or6(data []byte, p gopacket.PacketBuilder) error {
+	version := data[0] >> 4
+	switch version {
+	case 4:
+		return decodeIPv4(data, p)
+	case 6:
+		return decodeIPv6(data, p)
+	}
+	return fmt.Errorf("Invalid IP packet version %v", version)
+}
+
+func initActualTypeData() {
+	// Each of the XXXTypeMetadata arrays contains mappings of how to handle enum
+	// values for various enum types in gopacket/layers.
+	// These arrays are actually created by gen2.go and stored in
+	// enums_generated.go.
+	//
+	// So, EthernetTypeMetadata[2] contains information on how to handle EthernetType
+	// 2, including which name to give it and which decoder to use to decode
+	// packet data of that type.  These arrays are filled by default with all of the
+	// protocols gopacket/layers knows how to handle, but users of the library can
+	// add new decoders or override existing ones.  For example, if you write a better
+	// TCP decoder, you can override IPProtocolMetadata[IPProtocolTCP].DecodeWith
+	// with your new decoder, and all gopacket/layers decoding will use your new
+	// decoder whenever they encounter that IPProtocol.
+
+	// Here we link up all enumerations with their respective names and decoders.
+	EthernetTypeMetadata[EthernetTypeLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC", LayerType: LayerTypeLLC}
+	EthernetTypeMetadata[EthernetTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	EthernetTypeMetadata[EthernetTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	EthernetTypeMetadata[EthernetTypeARP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeARP), Name: "ARP", LayerType: LayerTypeARP}
+	EthernetTypeMetadata[EthernetTypeDot1Q] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+	EthernetTypeMetadata[EthernetTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP", LayerType: LayerTypePPP}
+	EthernetTypeMetadata[EthernetTypePPPoEDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoEDiscovery", LayerType: LayerTypePPPoE}
+	EthernetTypeMetadata[EthernetTypePPPoESession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPPoE), Name: "PPPoESession", LayerType: LayerTypePPPoE}
+	EthernetTypeMetadata[EthernetTypeEthernetCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernetCTP), Name: "EthernetCTP", LayerType: LayerTypeEthernetCTP}
+	EthernetTypeMetadata[EthernetTypeCiscoDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeCiscoDiscovery), Name: "CiscoDiscovery", LayerType: LayerTypeCiscoDiscovery}
+	EthernetTypeMetadata[EthernetTypeNortelDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeNortelDiscovery), Name: "NortelDiscovery", LayerType: LayerTypeNortelDiscovery}
+	EthernetTypeMetadata[EthernetTypeLinkLayerDiscovery] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinkLayerDiscovery), Name: "LinkLayerDiscovery", LayerType: LayerTypeLinkLayerDiscovery}
+	EthernetTypeMetadata[EthernetTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast", LayerType: LayerTypeMPLS}
+	EthernetTypeMetadata[EthernetTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast", LayerType: LayerTypeMPLS}
+	EthernetTypeMetadata[EthernetTypeEAPOL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOL), Name: "EAPOL", LayerType: LayerTypeEAPOL}
+	EthernetTypeMetadata[EthernetTypeQinQ] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot1Q), Name: "Dot1Q", LayerType: LayerTypeDot1Q}
+	EthernetTypeMetadata[EthernetTypeTransparentEthernetBridging] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "TransparentEthernetBridging", LayerType: LayerTypeEthernet}
+	EthernetTypeMetadata[EthernetTypeERSPAN] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeERSPANII), Name: "ERSPAN Type II", LayerType: LayerTypeERSPANII}
+
+	IPProtocolMetadata[IPProtocolIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	IPProtocolMetadata[IPProtocolTCP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeTCP), Name: "TCP", LayerType: LayerTypeTCP}
+	IPProtocolMetadata[IPProtocolUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDP), Name: "UDP", LayerType: LayerTypeUDP}
+	IPProtocolMetadata[IPProtocolICMPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv4), Name: "ICMPv4", LayerType: LayerTypeICMPv4}
+	IPProtocolMetadata[IPProtocolICMPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeICMPv6), Name: "ICMPv6", LayerType: LayerTypeICMPv6}
+	IPProtocolMetadata[IPProtocolSCTP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTP), Name: "SCTP", LayerType: LayerTypeSCTP}
+	IPProtocolMetadata[IPProtocolIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	IPProtocolMetadata[IPProtocolIPIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	IPProtocolMetadata[IPProtocolEtherIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEtherIP), Name: "EtherIP", LayerType: LayerTypeEtherIP}
+	IPProtocolMetadata[IPProtocolRUDP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRUDP), Name: "RUDP", LayerType: LayerTypeRUDP}
+	IPProtocolMetadata[IPProtocolGRE] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeGRE), Name: "GRE", LayerType: LayerTypeGRE}
+	IPProtocolMetadata[IPProtocolIPv6HopByHop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6HopByHop), Name: "IPv6HopByHop", LayerType: LayerTypeIPv6HopByHop}
+	IPProtocolMetadata[IPProtocolIPv6Routing] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Routing), Name: "IPv6Routing", LayerType: LayerTypeIPv6Routing}
+	IPProtocolMetadata[IPProtocolIPv6Fragment] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Fragment), Name: "IPv6Fragment", LayerType: LayerTypeIPv6Fragment}
+	IPProtocolMetadata[IPProtocolIPv6Destination] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6Destination), Name: "IPv6Destination", LayerType: LayerTypeIPv6Destination}
+	IPProtocolMetadata[IPProtocolOSPF] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeOSPF), Name: "OSPF", LayerType: LayerTypeOSPF}
+	IPProtocolMetadata[IPProtocolAH] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecAH), Name: "IPSecAH", LayerType: LayerTypeIPSecAH}
+	IPProtocolMetadata[IPProtocolESP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPSecESP), Name: "IPSecESP", LayerType: LayerTypeIPSecESP}
+	IPProtocolMetadata[IPProtocolUDPLite] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUDPLite), Name: "UDPLite", LayerType: LayerTypeUDPLite}
+	IPProtocolMetadata[IPProtocolMPLSInIP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLS", LayerType: LayerTypeMPLS}
+	IPProtocolMetadata[IPProtocolNoNextHeader] = EnumMetadata{DecodeWith: gopacket.DecodePayload, Name: "NoNextHeader", LayerType: gopacket.LayerTypePayload}
+	IPProtocolMetadata[IPProtocolIGMP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIGMP), Name: "IGMP", LayerType: LayerTypeIGMP}
+	IPProtocolMetadata[IPProtocolVRRP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeVRRP), Name: "VRRP", LayerType: LayerTypeVRRP}
+
+	SCTPChunkTypeMetadata[SCTPChunkTypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPData), Name: "Data"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeInit] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "Init"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeInitAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPInit), Name: "InitAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeSack] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPSack), Name: "Sack"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeat] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "Heartbeat"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeHeartbeatAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPHeartbeat), Name: "HeartbeatAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeAbort] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Abort"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeError] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPError), Name: "Error"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeShutdown] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdown), Name: "Shutdown"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeShutdownAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPShutdownAck), Name: "ShutdownAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeCookieEcho] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPCookieEcho), Name: "CookieEcho"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeCookieAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "CookieAck"}
+	SCTPChunkTypeMetadata[SCTPChunkTypeShutdownComplete] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeSCTPEmptyLayer), Name: "ShutdownComplete"}
+
+	PPPTypeMetadata[PPPTypeIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4"}
+	PPPTypeMetadata[PPPTypeIPv6] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6"}
+	PPPTypeMetadata[PPPTypeMPLSUnicast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSUnicast"}
+	PPPTypeMetadata[PPPTypeMPLSMulticast] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeMPLS), Name: "MPLSMulticast"}
+
+	PPPoECodeMetadata[PPPoECodeSession] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+
+	LinkTypeMetadata[LinkTypeEthernet] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEthernet), Name: "Ethernet"}
+	LinkTypeMetadata[LinkTypePPP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePPP), Name: "PPP"}
+	LinkTypeMetadata[LinkTypeFDDI] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeFDDI), Name: "FDDI"}
+	LinkTypeMetadata[LinkTypeNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Null"}
+	LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "Dot11"}
+	LinkTypeMetadata[LinkTypeLoop] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLoopback), Name: "Loop"}
+	LinkTypeMetadata[LinkTypeIEEE802_11] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11), Name: "802.11"}
+	LinkTypeMetadata[LinkTypeRaw] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+	// See https://github.com/the-tcpdump-group/libpcap/blob/170f717e6e818cdc4bcbbfd906b63088eaa88fa0/pcap/dlt.h#L85
+	// Or https://github.com/wireshark/wireshark/blob/854cfe53efe44080609c78053ecfb2342ad84a08/wiretap/pcap-common.c#L508
+	if runtime.GOOS == "openbsd" {
+		LinkTypeMetadata[14] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+	} else {
+		LinkTypeMetadata[12] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4or6), Name: "Raw"}
+	}
+	LinkTypeMetadata[LinkTypePFLog] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePFLog), Name: "PFLog"}
+	LinkTypeMetadata[LinkTypeIEEE80211Radio] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeRadioTap), Name: "RadioTap"}
+	LinkTypeMetadata[LinkTypeLinuxUSB] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSB), Name: "USB"}
+	LinkTypeMetadata[LinkTypeLinuxSLL] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLinuxSLL), Name: "Linux SLL"}
+	LinkTypeMetadata[LinkTypePrismHeader] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodePrismHeader), Name: "Prism"}
+
+	FDDIFrameControlMetadata[FDDIFrameControlLLC] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeLLC), Name: "LLC"}
+
+	EAPOLTypeMetadata[EAPOLTypeEAP] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAP), Name: "EAP", LayerType: LayerTypeEAP}
+	EAPOLTypeMetadata[EAPOLTypeKey] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeEAPOLKey), Name: "EAPOLKey", LayerType: LayerTypeEAPOLKey}
+
+	ProtocolFamilyMetadata[ProtocolFamilyIPv4] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv4), Name: "IPv4", LayerType: LayerTypeIPv4}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6BSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6FreeBSD] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6Darwin] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+	ProtocolFamilyMetadata[ProtocolFamilyIPv6Linux] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeIPv6), Name: "IPv6", LayerType: LayerTypeIPv6}
+
+	Dot11TypeMetadata[Dot11TypeMgmtAssociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq), Name: "MgmtAssociationReq", LayerType: LayerTypeDot11MgmtAssociationReq}
+	Dot11TypeMetadata[Dot11TypeMgmtAssociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp), Name: "MgmtAssociationResp", LayerType: LayerTypeDot11MgmtAssociationResp}
+	Dot11TypeMetadata[Dot11TypeMgmtReassociationReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq), Name: "MgmtReassociationReq", LayerType: LayerTypeDot11MgmtReassociationReq}
+	Dot11TypeMetadata[Dot11TypeMgmtReassociationResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp), Name: "MgmtReassociationResp", LayerType: LayerTypeDot11MgmtReassociationResp}
+	Dot11TypeMetadata[Dot11TypeMgmtProbeReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeReq), Name: "MgmtProbeReq", LayerType: LayerTypeDot11MgmtProbeReq}
+	Dot11TypeMetadata[Dot11TypeMgmtProbeResp] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtProbeResp), Name: "MgmtProbeResp", LayerType: LayerTypeDot11MgmtProbeResp}
+	Dot11TypeMetadata[Dot11TypeMgmtMeasurementPilot] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot), Name: "MgmtMeasurementPilot", LayerType: LayerTypeDot11MgmtMeasurementPilot}
+	Dot11TypeMetadata[Dot11TypeMgmtBeacon] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtBeacon), Name: "MgmtBeacon", LayerType: LayerTypeDot11MgmtBeacon}
+	Dot11TypeMetadata[Dot11TypeMgmtATIM] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtATIM), Name: "MgmtATIM", LayerType: LayerTypeDot11MgmtATIM}
+	Dot11TypeMetadata[Dot11TypeMgmtDisassociation] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDisassociation), Name: "MgmtDisassociation", LayerType: LayerTypeDot11MgmtDisassociation}
+	Dot11TypeMetadata[Dot11TypeMgmtAuthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAuthentication), Name: "MgmtAuthentication", LayerType: LayerTypeDot11MgmtAuthentication}
+	Dot11TypeMetadata[Dot11TypeMgmtDeauthentication] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication), Name: "MgmtDeauthentication", LayerType: LayerTypeDot11MgmtDeauthentication}
+	Dot11TypeMetadata[Dot11TypeMgmtAction] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtAction), Name: "MgmtAction", LayerType: LayerTypeDot11MgmtAction}
+	Dot11TypeMetadata[Dot11TypeMgmtActionNoAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck), Name: "MgmtActionNoAck", LayerType: LayerTypeDot11MgmtActionNoAck}
+	Dot11TypeMetadata[Dot11TypeCtrl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "Ctrl", LayerType: LayerTypeDot11Ctrl}
+	Dot11TypeMetadata[Dot11TypeCtrlWrapper] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Ctrl), Name: "CtrlWrapper", LayerType: LayerTypeDot11Ctrl}
+	Dot11TypeMetadata[Dot11TypeCtrlBlockAckReq] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq), Name: "CtrlBlockAckReq", LayerType: LayerTypeDot11CtrlBlockAckReq}
+	Dot11TypeMetadata[Dot11TypeCtrlBlockAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlBlockAck), Name: "CtrlBlockAck", LayerType: LayerTypeDot11CtrlBlockAck}
+	Dot11TypeMetadata[Dot11TypeCtrlPowersavePoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll), Name: "CtrlPowersavePoll", LayerType: LayerTypeDot11CtrlPowersavePoll}
+	Dot11TypeMetadata[Dot11TypeCtrlRTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlRTS), Name: "CtrlRTS", LayerType: LayerTypeDot11CtrlRTS}
+	Dot11TypeMetadata[Dot11TypeCtrlCTS] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCTS), Name: "CtrlCTS", LayerType: LayerTypeDot11CtrlCTS}
+	Dot11TypeMetadata[Dot11TypeCtrlAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlAck), Name: "CtrlAck", LayerType: LayerTypeDot11CtrlAck}
+	Dot11TypeMetadata[Dot11TypeCtrlCFEnd] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEnd), Name: "CtrlCFEnd", LayerType: LayerTypeDot11CtrlCFEnd}
+	Dot11TypeMetadata[Dot11TypeCtrlCFEndAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck), Name: "CtrlCFEndAck", LayerType: LayerTypeDot11CtrlCFEndAck}
+	Dot11TypeMetadata[Dot11TypeData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11Data), Name: "Data", LayerType: LayerTypeDot11Data}
+	Dot11TypeMetadata[Dot11TypeDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAck), Name: "DataCFAck", LayerType: LayerTypeDot11DataCFAck}
+	Dot11TypeMetadata[Dot11TypeDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPoll), Name: "DataCFPoll", LayerType: LayerTypeDot11DataCFPoll}
+	Dot11TypeMetadata[Dot11TypeDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPoll), Name: "DataCFAckPoll", LayerType: LayerTypeDot11DataCFAckPoll}
+	Dot11TypeMetadata[Dot11TypeDataNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataNull), Name: "DataNull", LayerType: LayerTypeDot11DataNull}
+	Dot11TypeMetadata[Dot11TypeDataCFAckNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckNoData), Name: "DataCFAckNoData", LayerType: LayerTypeDot11DataCFAckNoData}
+	Dot11TypeMetadata[Dot11TypeDataCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFPollNoData), Name: "DataCFPollNoData", LayerType: LayerTypeDot11DataCFPollNoData}
+	Dot11TypeMetadata[Dot11TypeDataCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataCFAckPollNoData), Name: "DataCFAckPollNoData", LayerType: LayerTypeDot11DataCFAckPollNoData}
+	Dot11TypeMetadata[Dot11TypeDataQOSData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSData), Name: "DataQOSData", LayerType: LayerTypeDot11DataQOSData}
+	Dot11TypeMetadata[Dot11TypeDataQOSDataCFAck] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck), Name: "DataQOSDataCFAck", LayerType: LayerTypeDot11DataQOSDataCFAck}
+	Dot11TypeMetadata[Dot11TypeDataQOSDataCFPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll), Name: "DataQOSDataCFPoll", LayerType: LayerTypeDot11DataQOSDataCFPoll}
+	Dot11TypeMetadata[Dot11TypeDataQOSDataCFAckPoll] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll), Name: "DataQOSDataCFAckPoll", LayerType: LayerTypeDot11DataQOSDataCFAckPoll}
+	Dot11TypeMetadata[Dot11TypeDataQOSNull] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSNull), Name: "DataQOSNull", LayerType: LayerTypeDot11DataQOSNull}
+	Dot11TypeMetadata[Dot11TypeDataQOSCFPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData), Name: "DataQOSCFPollNoData", LayerType: LayerTypeDot11DataQOSCFPollNoData}
+	Dot11TypeMetadata[Dot11TypeDataQOSCFAckPollNoData] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData), Name: "DataQOSCFAckPollNoData", LayerType: LayerTypeDot11DataQOSCFAckPollNoData}
+
+	USBTransportTypeMetadata[USBTransportTypeInterrupt] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBInterrupt), Name: "Interrupt", LayerType: LayerTypeUSBInterrupt}
+	USBTransportTypeMetadata[USBTransportTypeControl] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBControl), Name: "Control", LayerType: LayerTypeUSBControl}
+	USBTransportTypeMetadata[USBTransportTypeBulk] = EnumMetadata{DecodeWith: gopacket.DecodeFunc(decodeUSBBulk), Name: "Bulk", LayerType: LayerTypeUSBBulk}
+}
diff --git a/vendor/github.com/google/gopacket/layers/enums_generated.go b/vendor/github.com/google/gopacket/layers/enums_generated.go
new file mode 100644
index 0000000..bf77aac
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/enums_generated.go
@@ -0,0 +1,434 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen2.go, don't edit manually
+// Generated at 2017-10-23 10:20:24.458771856 -0600 MDT m=+0.001159033
+
+import (
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+func init() {
+	initUnknownTypesForLinkType()
+	initUnknownTypesForEthernetType()
+	initUnknownTypesForPPPType()
+	initUnknownTypesForIPProtocol()
+	initUnknownTypesForSCTPChunkType()
+	initUnknownTypesForPPPoECode()
+	initUnknownTypesForFDDIFrameControl()
+	initUnknownTypesForEAPOLType()
+	initUnknownTypesForProtocolFamily()
+	initUnknownTypesForDot11Type()
+	initUnknownTypesForUSBTransportType()
+	initActualTypeData()
+}
+
+// Decoder calls LinkTypeMetadata.DecodeWith's decoder.
+func (a LinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return LinkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns LinkTypeMetadata.Name.
+func (a LinkType) String() string {
+	return LinkTypeMetadata[a].Name
+}
+
+// LayerType returns LinkTypeMetadata.LayerType.
+func (a LinkType) LayerType() gopacket.LayerType {
+	return LinkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForLinkType int
+
+func (a *errorDecoderForLinkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForLinkType) Error() string {
+	return fmt.Sprintf("Unable to decode LinkType %d", int(*a))
+}
+
+var errorDecodersForLinkType [256]errorDecoderForLinkType
+var LinkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForLinkType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForLinkType[i] = errorDecoderForLinkType(i)
+		LinkTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForLinkType[i],
+			Name:       "UnknownLinkType",
+		}
+	}
+}
+
+// Decoder calls EthernetTypeMetadata.DecodeWith's decoder.
+func (a EthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return EthernetTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EthernetTypeMetadata.Name.
+func (a EthernetType) String() string {
+	return EthernetTypeMetadata[a].Name
+}
+
+// LayerType returns EthernetTypeMetadata.LayerType.
+func (a EthernetType) LayerType() gopacket.LayerType {
+	return EthernetTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEthernetType int
+
+func (a *errorDecoderForEthernetType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForEthernetType) Error() string {
+	return fmt.Sprintf("Unable to decode EthernetType %d", int(*a))
+}
+
+var errorDecodersForEthernetType [65536]errorDecoderForEthernetType
+var EthernetTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForEthernetType() {
+	for i := 0; i < 65536; i++ {
+		errorDecodersForEthernetType[i] = errorDecoderForEthernetType(i)
+		EthernetTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForEthernetType[i],
+			Name:       "UnknownEthernetType",
+		}
+	}
+}
+
+// Decoder calls PPPTypeMetadata.DecodeWith's decoder.
+func (a PPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return PPPTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPTypeMetadata.Name.
+func (a PPPType) String() string {
+	return PPPTypeMetadata[a].Name
+}
+
+// LayerType returns PPPTypeMetadata.LayerType.
+func (a PPPType) LayerType() gopacket.LayerType {
+	return PPPTypeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPType int
+
+func (a *errorDecoderForPPPType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForPPPType) Error() string {
+	return fmt.Sprintf("Unable to decode PPPType %d", int(*a))
+}
+
+var errorDecodersForPPPType [65536]errorDecoderForPPPType
+var PPPTypeMetadata [65536]EnumMetadata
+
+func initUnknownTypesForPPPType() {
+	for i := 0; i < 65536; i++ {
+		errorDecodersForPPPType[i] = errorDecoderForPPPType(i)
+		PPPTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForPPPType[i],
+			Name:       "UnknownPPPType",
+		}
+	}
+}
+
+// Decoder calls IPProtocolMetadata.DecodeWith's decoder.
+func (a IPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return IPProtocolMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns IPProtocolMetadata.Name.
+func (a IPProtocol) String() string {
+	return IPProtocolMetadata[a].Name
+}
+
+// LayerType returns IPProtocolMetadata.LayerType.
+func (a IPProtocol) LayerType() gopacket.LayerType {
+	return IPProtocolMetadata[a].LayerType
+}
+
+type errorDecoderForIPProtocol int
+
+func (a *errorDecoderForIPProtocol) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForIPProtocol) Error() string {
+	return fmt.Sprintf("Unable to decode IPProtocol %d", int(*a))
+}
+
+var errorDecodersForIPProtocol [256]errorDecoderForIPProtocol
+var IPProtocolMetadata [256]EnumMetadata
+
+func initUnknownTypesForIPProtocol() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForIPProtocol[i] = errorDecoderForIPProtocol(i)
+		IPProtocolMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForIPProtocol[i],
+			Name:       "UnknownIPProtocol",
+		}
+	}
+}
+
+// Decoder calls SCTPChunkTypeMetadata.DecodeWith's decoder.
+func (a SCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return SCTPChunkTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns SCTPChunkTypeMetadata.Name.
+func (a SCTPChunkType) String() string {
+	return SCTPChunkTypeMetadata[a].Name
+}
+
+// LayerType returns SCTPChunkTypeMetadata.LayerType.
+func (a SCTPChunkType) LayerType() gopacket.LayerType {
+	return SCTPChunkTypeMetadata[a].LayerType
+}
+
+type errorDecoderForSCTPChunkType int
+
+func (a *errorDecoderForSCTPChunkType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForSCTPChunkType) Error() string {
+	return fmt.Sprintf("Unable to decode SCTPChunkType %d", int(*a))
+}
+
+var errorDecodersForSCTPChunkType [256]errorDecoderForSCTPChunkType
+var SCTPChunkTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForSCTPChunkType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForSCTPChunkType[i] = errorDecoderForSCTPChunkType(i)
+		SCTPChunkTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForSCTPChunkType[i],
+			Name:       "UnknownSCTPChunkType",
+		}
+	}
+}
+
+// Decoder calls PPPoECodeMetadata.DecodeWith's decoder.
+func (a PPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return PPPoECodeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns PPPoECodeMetadata.Name.
+func (a PPPoECode) String() string {
+	return PPPoECodeMetadata[a].Name
+}
+
+// LayerType returns PPPoECodeMetadata.LayerType.
+func (a PPPoECode) LayerType() gopacket.LayerType {
+	return PPPoECodeMetadata[a].LayerType
+}
+
+type errorDecoderForPPPoECode int
+
+func (a *errorDecoderForPPPoECode) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForPPPoECode) Error() string {
+	return fmt.Sprintf("Unable to decode PPPoECode %d", int(*a))
+}
+
+var errorDecodersForPPPoECode [256]errorDecoderForPPPoECode
+var PPPoECodeMetadata [256]EnumMetadata
+
+func initUnknownTypesForPPPoECode() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForPPPoECode[i] = errorDecoderForPPPoECode(i)
+		PPPoECodeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForPPPoECode[i],
+			Name:       "UnknownPPPoECode",
+		}
+	}
+}
+
+// Decoder calls FDDIFrameControlMetadata.DecodeWith's decoder.
+func (a FDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return FDDIFrameControlMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns FDDIFrameControlMetadata.Name.
+func (a FDDIFrameControl) String() string {
+	return FDDIFrameControlMetadata[a].Name
+}
+
+// LayerType returns FDDIFrameControlMetadata.LayerType.
+func (a FDDIFrameControl) LayerType() gopacket.LayerType {
+	return FDDIFrameControlMetadata[a].LayerType
+}
+
+type errorDecoderForFDDIFrameControl int
+
+func (a *errorDecoderForFDDIFrameControl) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForFDDIFrameControl) Error() string {
+	return fmt.Sprintf("Unable to decode FDDIFrameControl %d", int(*a))
+}
+
+var errorDecodersForFDDIFrameControl [256]errorDecoderForFDDIFrameControl
+var FDDIFrameControlMetadata [256]EnumMetadata
+
+func initUnknownTypesForFDDIFrameControl() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForFDDIFrameControl[i] = errorDecoderForFDDIFrameControl(i)
+		FDDIFrameControlMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForFDDIFrameControl[i],
+			Name:       "UnknownFDDIFrameControl",
+		}
+	}
+}
+
+// Decoder calls EAPOLTypeMetadata.DecodeWith's decoder.
+func (a EAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return EAPOLTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns EAPOLTypeMetadata.Name.
+func (a EAPOLType) String() string {
+	return EAPOLTypeMetadata[a].Name
+}
+
+// LayerType returns EAPOLTypeMetadata.LayerType.
+func (a EAPOLType) LayerType() gopacket.LayerType {
+	return EAPOLTypeMetadata[a].LayerType
+}
+
+type errorDecoderForEAPOLType int
+
+func (a *errorDecoderForEAPOLType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForEAPOLType) Error() string {
+	return fmt.Sprintf("Unable to decode EAPOLType %d", int(*a))
+}
+
+var errorDecodersForEAPOLType [256]errorDecoderForEAPOLType
+var EAPOLTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForEAPOLType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForEAPOLType[i] = errorDecoderForEAPOLType(i)
+		EAPOLTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForEAPOLType[i],
+			Name:       "UnknownEAPOLType",
+		}
+	}
+}
+
+// Decoder calls ProtocolFamilyMetadata.DecodeWith's decoder.
+func (a ProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return ProtocolFamilyMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns ProtocolFamilyMetadata.Name.
+func (a ProtocolFamily) String() string {
+	return ProtocolFamilyMetadata[a].Name
+}
+
+// LayerType returns ProtocolFamilyMetadata.LayerType.
+func (a ProtocolFamily) LayerType() gopacket.LayerType {
+	return ProtocolFamilyMetadata[a].LayerType
+}
+
+type errorDecoderForProtocolFamily int
+
+func (a *errorDecoderForProtocolFamily) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForProtocolFamily) Error() string {
+	return fmt.Sprintf("Unable to decode ProtocolFamily %d", int(*a))
+}
+
+var errorDecodersForProtocolFamily [256]errorDecoderForProtocolFamily
+var ProtocolFamilyMetadata [256]EnumMetadata
+
+func initUnknownTypesForProtocolFamily() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForProtocolFamily[i] = errorDecoderForProtocolFamily(i)
+		ProtocolFamilyMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForProtocolFamily[i],
+			Name:       "UnknownProtocolFamily",
+		}
+	}
+}
+
+// Decoder calls Dot11TypeMetadata.DecodeWith's decoder.
+func (a Dot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return Dot11TypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns Dot11TypeMetadata.Name.
+func (a Dot11Type) String() string {
+	return Dot11TypeMetadata[a].Name
+}
+
+// LayerType returns Dot11TypeMetadata.LayerType.
+func (a Dot11Type) LayerType() gopacket.LayerType {
+	return Dot11TypeMetadata[a].LayerType
+}
+
+type errorDecoderForDot11Type int
+
+func (a *errorDecoderForDot11Type) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForDot11Type) Error() string {
+	return fmt.Sprintf("Unable to decode Dot11Type %d", int(*a))
+}
+
+var errorDecodersForDot11Type [256]errorDecoderForDot11Type
+var Dot11TypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForDot11Type() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForDot11Type[i] = errorDecoderForDot11Type(i)
+		Dot11TypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForDot11Type[i],
+			Name:       "UnknownDot11Type",
+		}
+	}
+}
+
+// Decoder calls USBTransportTypeMetadata.DecodeWith's decoder.
+func (a USBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return USBTransportTypeMetadata[a].DecodeWith.Decode(data, p)
+}
+
+// String returns USBTransportTypeMetadata.Name.
+func (a USBTransportType) String() string {
+	return USBTransportTypeMetadata[a].Name
+}
+
+// LayerType returns USBTransportTypeMetadata.LayerType.
+func (a USBTransportType) LayerType() gopacket.LayerType {
+	return USBTransportTypeMetadata[a].LayerType
+}
+
+type errorDecoderForUSBTransportType int
+
+func (a *errorDecoderForUSBTransportType) Decode(data []byte, p gopacket.PacketBuilder) error {
+	return a
+}
+func (a *errorDecoderForUSBTransportType) Error() string {
+	return fmt.Sprintf("Unable to decode USBTransportType %d", int(*a))
+}
+
+var errorDecodersForUSBTransportType [256]errorDecoderForUSBTransportType
+var USBTransportTypeMetadata [256]EnumMetadata
+
+func initUnknownTypesForUSBTransportType() {
+	for i := 0; i < 256; i++ {
+		errorDecodersForUSBTransportType[i] = errorDecoderForUSBTransportType(i)
+		USBTransportTypeMetadata[i] = EnumMetadata{
+			DecodeWith: &errorDecodersForUSBTransportType[i],
+			Name:       "UnknownUSBTransportType",
+		}
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/erspan2.go b/vendor/github.com/google/gopacket/layers/erspan2.go
new file mode 100644
index 0000000..1544362
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/erspan2.go
@@ -0,0 +1,86 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	//ERSPANIIVersionObsolete - The obsolete value for the version field
+	ERSPANIIVersionObsolete = 0x0
+	// ERSPANIIVersion - The current value for the version field
+	ERSPANIIVersion = 0x1
+)
+
+// ERSPANII contains all of the fields found in an ERSPAN Type II header
+// https://tools.ietf.org/html/draft-foschiano-erspan-03
+type ERSPANII struct {
+	BaseLayer
+	IsTruncated                         bool
+	Version, CoS, TrunkEncap            uint8
+	VLANIdentifier, SessionID, Reserved uint16
+	Index                               uint32
+}
+
+func (erspan2 *ERSPANII) LayerType() gopacket.LayerType { return LayerTypeERSPANII }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (erspan2 *ERSPANII) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	erspan2Length := 8
+	erspan2.Version = data[0] & 0xF0 >> 4
+	erspan2.VLANIdentifier = binary.BigEndian.Uint16(data[:2]) & 0x0FFF
+	erspan2.CoS = data[2] & 0xE0 >> 5
+	erspan2.TrunkEncap = data[2] & 0x18 >> 3
+	erspan2.IsTruncated = data[2]&0x4>>2 != 0
+	erspan2.SessionID = binary.BigEndian.Uint16(data[2:4]) & 0x03FF
+	erspan2.Reserved = binary.BigEndian.Uint16(data[4:6]) & 0xFFF0 >> 4
+	erspan2.Index = binary.BigEndian.Uint32(data[4:8]) & 0x000FFFFF
+	erspan2.Contents = data[:erspan2Length]
+	erspan2.Payload = data[erspan2Length:]
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (erspan2 *ERSPANII) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+
+	twoByteInt := uint16(erspan2.Version&0xF)<<12 | erspan2.VLANIdentifier&0x0FFF
+	binary.BigEndian.PutUint16(bytes, twoByteInt)
+
+	twoByteInt = uint16(erspan2.CoS&0x7)<<13 | uint16(erspan2.TrunkEncap&0x3)<<11 | erspan2.SessionID&0x03FF
+	if erspan2.IsTruncated {
+		twoByteInt |= 0x400
+	}
+	binary.BigEndian.PutUint16(bytes[2:], twoByteInt)
+
+	fourByteInt := uint32(erspan2.Reserved&0x0FFF)<<20 | erspan2.Index&0x000FFFFF
+	binary.BigEndian.PutUint32(bytes[4:], fourByteInt)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (erspan2 *ERSPANII) CanDecode() gopacket.LayerClass {
+	return LayerTypeERSPANII
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (erspan2 *ERSPANII) NextLayerType() gopacket.LayerType {
+	return LayerTypeEthernet
+}
+
+func decodeERSPANII(data []byte, p gopacket.PacketBuilder) error {
+	erspan2 := &ERSPANII{}
+	return decodingLayerDecoder(erspan2, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/etherip.go b/vendor/github.com/google/gopacket/layers/etherip.go
new file mode 100644
index 0000000..5b7b722
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/etherip.go
@@ -0,0 +1,45 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+// EtherIP is the struct for storing RFC 3378 EtherIP packet headers.
+type EtherIP struct {
+	BaseLayer
+	Version  uint8
+	Reserved uint16
+}
+
+// LayerType returns gopacket.LayerTypeEtherIP.
+func (e *EtherIP) LayerType() gopacket.LayerType { return LayerTypeEtherIP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (e *EtherIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	e.Version = data[0] >> 4
+	e.Reserved = binary.BigEndian.Uint16(data[:2]) & 0x0fff
+	e.BaseLayer = BaseLayer{data[:2], data[2:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (e *EtherIP) CanDecode() gopacket.LayerClass {
+	return LayerTypeEtherIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (e *EtherIP) NextLayerType() gopacket.LayerType {
+	return LayerTypeEthernet
+}
+
+func decodeEtherIP(data []byte, p gopacket.PacketBuilder) error {
+	e := &EtherIP{}
+	return decodingLayerDecoder(e, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ethernet.go b/vendor/github.com/google/gopacket/layers/ethernet.go
new file mode 100644
index 0000000..b73748f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ethernet.go
@@ -0,0 +1,123 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"github.com/google/gopacket"
+	"net"
+)
+
+// EthernetBroadcast is the broadcast MAC address used by Ethernet.
+var EthernetBroadcast = net.HardwareAddr{0xff, 0xff, 0xff, 0xff, 0xff, 0xff}
+
+// Ethernet is the layer for Ethernet frame headers.
+type Ethernet struct {
+	BaseLayer
+	SrcMAC, DstMAC net.HardwareAddr
+	EthernetType   EthernetType
+	// Length is only set if a length field exists within this header.  Ethernet
+	// headers follow two different standards, one that uses an EthernetType, the
+	// other which defines a length the follows with a LLC header (802.3).  If the
+	// former is the case, we set EthernetType and Length stays 0.  In the latter
+	// case, we set Length and EthernetType = EthernetTypeLLC.
+	Length uint16
+}
+
+// LayerType returns LayerTypeEthernet
+func (e *Ethernet) LayerType() gopacket.LayerType { return LayerTypeEthernet }
+
+func (e *Ethernet) LinkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointMAC, e.SrcMAC, e.DstMAC)
+}
+
+func (eth *Ethernet) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 14 {
+		return errors.New("Ethernet packet too small")
+	}
+	eth.DstMAC = net.HardwareAddr(data[0:6])
+	eth.SrcMAC = net.HardwareAddr(data[6:12])
+	eth.EthernetType = EthernetType(binary.BigEndian.Uint16(data[12:14]))
+	eth.BaseLayer = BaseLayer{data[:14], data[14:]}
+	eth.Length = 0
+	if eth.EthernetType < 0x0600 {
+		eth.Length = uint16(eth.EthernetType)
+		eth.EthernetType = EthernetTypeLLC
+		if cmp := len(eth.Payload) - int(eth.Length); cmp < 0 {
+			df.SetTruncated()
+		} else if cmp > 0 {
+			// Strip off bytes at the end, since we have too many bytes
+			eth.Payload = eth.Payload[:len(eth.Payload)-cmp]
+		}
+		//	fmt.Println(eth)
+	}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (eth *Ethernet) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if len(eth.DstMAC) != 6 {
+		return fmt.Errorf("invalid dst MAC: %v", eth.DstMAC)
+	}
+	if len(eth.SrcMAC) != 6 {
+		return fmt.Errorf("invalid src MAC: %v", eth.SrcMAC)
+	}
+	payload := b.Bytes()
+	bytes, err := b.PrependBytes(14)
+	if err != nil {
+		return err
+	}
+	copy(bytes, eth.DstMAC)
+	copy(bytes[6:], eth.SrcMAC)
+	if eth.Length != 0 || eth.EthernetType == EthernetTypeLLC {
+		if opts.FixLengths {
+			eth.Length = uint16(len(payload))
+		}
+		if eth.EthernetType != EthernetTypeLLC {
+			return fmt.Errorf("ethernet type %v not compatible with length value %v", eth.EthernetType, eth.Length)
+		} else if eth.Length > 0x0600 {
+			return fmt.Errorf("invalid ethernet length %v", eth.Length)
+		}
+		binary.BigEndian.PutUint16(bytes[12:], eth.Length)
+	} else {
+		binary.BigEndian.PutUint16(bytes[12:], uint16(eth.EthernetType))
+	}
+	length := len(b.Bytes())
+	if length < 60 {
+		// Pad out to 60 bytes.
+		padding, err := b.AppendBytes(60 - length)
+		if err != nil {
+			return err
+		}
+		copy(padding, lotsOfZeros[:])
+	}
+	return nil
+}
+
+func (eth *Ethernet) CanDecode() gopacket.LayerClass {
+	return LayerTypeEthernet
+}
+
+func (eth *Ethernet) NextLayerType() gopacket.LayerType {
+	return eth.EthernetType.LayerType()
+}
+
+func decodeEthernet(data []byte, p gopacket.PacketBuilder) error {
+	eth := &Ethernet{}
+	err := eth.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(eth)
+	p.SetLinkLayer(eth)
+	return p.NextDecoder(eth.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/fddi.go b/vendor/github.com/google/gopacket/layers/fddi.go
new file mode 100644
index 0000000..ed9e195
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/fddi.go
@@ -0,0 +1,41 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+	"net"
+)
+
+// FDDI contains the header for FDDI frames.
+type FDDI struct {
+	BaseLayer
+	FrameControl   FDDIFrameControl
+	Priority       uint8
+	SrcMAC, DstMAC net.HardwareAddr
+}
+
+// LayerType returns LayerTypeFDDI.
+func (f *FDDI) LayerType() gopacket.LayerType { return LayerTypeFDDI }
+
+// LinkFlow returns a new flow of type EndpointMAC.
+func (f *FDDI) LinkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointMAC, f.SrcMAC, f.DstMAC)
+}
+
+func decodeFDDI(data []byte, p gopacket.PacketBuilder) error {
+	f := &FDDI{
+		FrameControl: FDDIFrameControl(data[0] & 0xF8),
+		Priority:     data[0] & 0x07,
+		SrcMAC:       net.HardwareAddr(data[1:7]),
+		DstMAC:       net.HardwareAddr(data[7:13]),
+		BaseLayer:    BaseLayer{data[:13], data[13:]},
+	}
+	p.SetLinkLayer(f)
+	p.AddLayer(f)
+	return p.NextDecoder(f.FrameControl)
+}
diff --git a/vendor/github.com/google/gopacket/layers/fuzz_layer.go b/vendor/github.com/google/gopacket/layers/fuzz_layer.go
new file mode 100644
index 0000000..606e45d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/fuzz_layer.go
@@ -0,0 +1,39 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+import (
+	"encoding/binary"
+
+	"github.com/google/gopacket"
+)
+
+// FuzzLayer is a fuzz target for the layers package of gopacket
+// A fuzz target is a function processing a binary blob (byte slice)
+// The process here is to interpret this data as a packet, and print the layers contents.
+// The decoding options and the starting layer are encoded in the first bytes.
+// The function returns 1 if this is a valid packet (no error layer)
+func FuzzLayer(data []byte) int {
+	if len(data) < 3 {
+		return 0
+	}
+	// use the first two bytes to choose the top level layer
+	startLayer := binary.BigEndian.Uint16(data[:2])
+	var fuzzOpts = gopacket.DecodeOptions{
+		Lazy:                     data[2]&0x1 != 0,
+		NoCopy:                   data[2]&0x2 != 0,
+		SkipDecodeRecovery:       data[2]&0x4 != 0,
+		DecodeStreamsAsDatagrams: data[2]&0x8 != 0,
+	}
+	p := gopacket.NewPacket(data[3:], gopacket.LayerType(startLayer), fuzzOpts)
+	for _, l := range p.Layers() {
+		gopacket.LayerString(l)
+	}
+	if p.ErrorLayer() != nil {
+		return 0
+	}
+	return 1
+}
diff --git a/vendor/github.com/google/gopacket/layers/gen_linted.sh b/vendor/github.com/google/gopacket/layers/gen_linted.sh
new file mode 100644
index 0000000..75c701f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gen_linted.sh
@@ -0,0 +1,3 @@
+#!/bin/bash
+
+for i in *.go; do golint $i | grep -q . || echo $i; done > .linted
diff --git a/vendor/github.com/google/gopacket/layers/geneve.go b/vendor/github.com/google/gopacket/layers/geneve.go
new file mode 100644
index 0000000..e9a1428
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/geneve.go
@@ -0,0 +1,121 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// Geneve is specifed here https://tools.ietf.org/html/draft-ietf-nvo3-geneve-03
+// Geneve Header:
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Ver|  Opt Len  |O|C|    Rsvd.  |          Protocol Type        |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |        Virtual Network Identifier (VNI)       |    Reserved   |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                    Variable Length Options                    |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type Geneve struct {
+	BaseLayer
+	Version        uint8        // 2 bits
+	OptionsLength  uint8        // 6 bits
+	OAMPacket      bool         // 1 bits
+	CriticalOption bool         // 1 bits
+	Protocol       EthernetType // 16 bits
+	VNI            uint32       // 24bits
+	Options        []*GeneveOption
+}
+
+// Geneve Tunnel Options
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |          Option Class         |      Type     |R|R|R| Length  |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                      Variable Option Data                     |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type GeneveOption struct {
+	Class  uint16 // 16 bits
+	Type   uint8  // 8 bits
+	Flags  uint8  // 3 bits
+	Length uint8  // 5 bits
+	Data   []byte
+}
+
+// LayerType returns LayerTypeGeneve
+func (gn *Geneve) LayerType() gopacket.LayerType { return LayerTypeGeneve }
+
+func decodeGeneveOption(data []byte, gn *Geneve, df gopacket.DecodeFeedback) (*GeneveOption, uint8, error) {
+	if len(data) < 3 {
+		df.SetTruncated()
+		return nil, 0, errors.New("geneve option too small")
+	}
+	opt := &GeneveOption{}
+
+	opt.Class = binary.BigEndian.Uint16(data[0:2])
+	opt.Type = data[2]
+	opt.Flags = data[3] >> 4
+	opt.Length = (data[3]&0xf)*4 + 4
+
+	if len(data) < int(opt.Length) {
+		df.SetTruncated()
+		return nil, 0, errors.New("geneve option too small")
+	}
+	opt.Data = make([]byte, opt.Length-4)
+	copy(opt.Data, data[4:opt.Length])
+
+	return opt, opt.Length, nil
+}
+
+func (gn *Geneve) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 7 {
+		df.SetTruncated()
+		return errors.New("geneve packet too short")
+	}
+
+	gn.Version = data[0] >> 7
+	gn.OptionsLength = (data[0] & 0x3f) * 4
+
+	gn.OAMPacket = data[1]&0x80 > 0
+	gn.CriticalOption = data[1]&0x40 > 0
+	gn.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+
+	var buf [4]byte
+	copy(buf[1:], data[4:7])
+	gn.VNI = binary.BigEndian.Uint32(buf[:])
+
+	offset, length := uint8(8), int32(gn.OptionsLength)
+	if len(data) < int(length+7) {
+		df.SetTruncated()
+		return errors.New("geneve packet too short")
+	}
+
+	for length > 0 {
+		opt, len, err := decodeGeneveOption(data[offset:], gn, df)
+		if err != nil {
+			return err
+		}
+		gn.Options = append(gn.Options, opt)
+
+		length -= int32(len)
+		offset += len
+	}
+
+	gn.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+
+	return nil
+}
+
+func (gn *Geneve) NextLayerType() gopacket.LayerType {
+	return gn.Protocol.LayerType()
+}
+
+func decodeGeneve(data []byte, p gopacket.PacketBuilder) error {
+	gn := &Geneve{}
+	return decodingLayerDecoder(gn, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gre.go b/vendor/github.com/google/gopacket/layers/gre.go
new file mode 100644
index 0000000..9c5e7d2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gre.go
@@ -0,0 +1,200 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+
+	"github.com/google/gopacket"
+)
+
+// GRE is a Generic Routing Encapsulation header.
+type GRE struct {
+	BaseLayer
+	ChecksumPresent, RoutingPresent, KeyPresent, SeqPresent, StrictSourceRoute, AckPresent bool
+	RecursionControl, Flags, Version                                                       uint8
+	Protocol                                                                               EthernetType
+	Checksum, Offset                                                                       uint16
+	Key, Seq, Ack                                                                          uint32
+	*GRERouting
+}
+
+// GRERouting is GRE routing information, present if the RoutingPresent flag is
+// set.
+type GRERouting struct {
+	AddressFamily        uint16
+	SREOffset, SRELength uint8
+	RoutingInformation   []byte
+	Next                 *GRERouting
+}
+
+// LayerType returns gopacket.LayerTypeGRE.
+func (g *GRE) LayerType() gopacket.LayerType { return LayerTypeGRE }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (g *GRE) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	g.ChecksumPresent = data[0]&0x80 != 0
+	g.RoutingPresent = data[0]&0x40 != 0
+	g.KeyPresent = data[0]&0x20 != 0
+	g.SeqPresent = data[0]&0x10 != 0
+	g.StrictSourceRoute = data[0]&0x08 != 0
+	g.AckPresent = data[1]&0x80 != 0
+	g.RecursionControl = data[0] & 0x7
+	g.Flags = data[1] >> 3
+	g.Version = data[1] & 0x7
+	g.Protocol = EthernetType(binary.BigEndian.Uint16(data[2:4]))
+	offset := 4
+	if g.ChecksumPresent || g.RoutingPresent {
+		g.Checksum = binary.BigEndian.Uint16(data[offset : offset+2])
+		g.Offset = binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		offset += 4
+	}
+	if g.KeyPresent {
+		g.Key = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+	}
+	if g.SeqPresent {
+		g.Seq = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+	}
+	if g.RoutingPresent {
+		tail := &g.GRERouting
+		for {
+			sre := &GRERouting{
+				AddressFamily: binary.BigEndian.Uint16(data[offset : offset+2]),
+				SREOffset:     data[offset+2],
+				SRELength:     data[offset+3],
+			}
+			sre.RoutingInformation = data[offset+4 : offset+4+int(sre.SRELength)]
+			offset += 4 + int(sre.SRELength)
+			if sre.AddressFamily == 0 && sre.SRELength == 0 {
+				break
+			}
+			(*tail) = sre
+			tail = &sre.Next
+		}
+	}
+	if g.AckPresent {
+		g.Ack = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+	}
+	g.BaseLayer = BaseLayer{data[:offset], data[offset:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the SerializationBuffer,
+// implementing gopacket.SerializableLayer. See the docs for gopacket.SerializableLayer for more info.
+func (g *GRE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	size := 4
+	if g.ChecksumPresent || g.RoutingPresent {
+		size += 4
+	}
+	if g.KeyPresent {
+		size += 4
+	}
+	if g.SeqPresent {
+		size += 4
+	}
+	if g.RoutingPresent {
+		r := g.GRERouting
+		for r != nil {
+			size += 4 + int(r.SRELength)
+			r = r.Next
+		}
+		size += 4
+	}
+	if g.AckPresent {
+		size += 4
+	}
+	buf, err := b.PrependBytes(size)
+	if err != nil {
+		return err
+	}
+	// Reset any potentially dirty memory in the first 2 bytes, as these use OR to set flags.
+	buf[0] = 0
+	buf[1] = 0
+	if g.ChecksumPresent {
+		buf[0] |= 0x80
+	}
+	if g.RoutingPresent {
+		buf[0] |= 0x40
+	}
+	if g.KeyPresent {
+		buf[0] |= 0x20
+	}
+	if g.SeqPresent {
+		buf[0] |= 0x10
+	}
+	if g.StrictSourceRoute {
+		buf[0] |= 0x08
+	}
+	if g.AckPresent {
+		buf[1] |= 0x80
+	}
+	buf[0] |= g.RecursionControl
+	buf[1] |= g.Flags << 3
+	buf[1] |= g.Version
+	binary.BigEndian.PutUint16(buf[2:4], uint16(g.Protocol))
+	offset := 4
+	if g.ChecksumPresent || g.RoutingPresent {
+		// Don't write the checksum value yet, as we may need to compute it,
+		// which requires the entire header be complete.
+		// Instead we zeroize the memory in case it is dirty.
+		buf[offset] = 0
+		buf[offset+1] = 0
+		binary.BigEndian.PutUint16(buf[offset+2:offset+4], g.Offset)
+		offset += 4
+	}
+	if g.KeyPresent {
+		binary.BigEndian.PutUint32(buf[offset:offset+4], g.Key)
+		offset += 4
+	}
+	if g.SeqPresent {
+		binary.BigEndian.PutUint32(buf[offset:offset+4], g.Seq)
+		offset += 4
+	}
+	if g.RoutingPresent {
+		sre := g.GRERouting
+		for sre != nil {
+			binary.BigEndian.PutUint16(buf[offset:offset+2], sre.AddressFamily)
+			buf[offset+2] = sre.SREOffset
+			buf[offset+3] = sre.SRELength
+			copy(buf[offset+4:offset+4+int(sre.SRELength)], sre.RoutingInformation)
+			offset += 4 + int(sre.SRELength)
+			sre = sre.Next
+		}
+		// Terminate routing field with a "NULL" SRE.
+		binary.BigEndian.PutUint32(buf[offset:offset+4], 0)
+	}
+	if g.AckPresent {
+		binary.BigEndian.PutUint32(buf[offset:offset+4], g.Ack)
+		offset += 4
+	}
+	if g.ChecksumPresent {
+		if opts.ComputeChecksums {
+			g.Checksum = tcpipChecksum(b.Bytes(), 0)
+		}
+
+		binary.BigEndian.PutUint16(buf[4:6], g.Checksum)
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (g *GRE) CanDecode() gopacket.LayerClass {
+	return LayerTypeGRE
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (g *GRE) NextLayerType() gopacket.LayerType {
+	return g.Protocol.LayerType()
+}
+
+func decodeGRE(data []byte, p gopacket.PacketBuilder) error {
+	g := &GRE{}
+	return decodingLayerDecoder(g, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/gtp.go b/vendor/github.com/google/gopacket/layers/gtp.go
new file mode 100644
index 0000000..7e74268
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/gtp.go
@@ -0,0 +1,184 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+const gtpMinimumSizeInBytes int = 8
+
+// GTPExtensionHeader is used to carry extra data and enable future extensions of the GTP  without the need to use another version number.
+type GTPExtensionHeader struct {
+	Type    uint8
+	Content []byte
+}
+
+// GTPv1U protocol is used to exchange user data over GTP tunnels across the Sx interfaces.
+// Defined in https://portal.3gpp.org/desktopmodules/Specifications/SpecificationDetails.aspx?specificationId=1595
+type GTPv1U struct {
+	BaseLayer
+	Version             uint8
+	ProtocolType        uint8
+	Reserved            uint8
+	ExtensionHeaderFlag bool
+	SequenceNumberFlag  bool
+	NPDUFlag            bool
+	MessageType         uint8
+	MessageLength       uint16
+	TEID                uint32
+	SequenceNumber      uint16
+	NPDU                uint8
+	GTPExtensionHeaders []GTPExtensionHeader
+}
+
+// LayerType returns LayerTypeGTPV1U
+func (g *GTPv1U) LayerType() gopacket.LayerType { return LayerTypeGTPv1U }
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as a GTPv1U packet
+func (g *GTPv1U) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	hLen := gtpMinimumSizeInBytes
+	dLen := len(data)
+	if dLen < hLen {
+		return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+	}
+	g.Version = (data[0] >> 5) & 0x07
+	g.ProtocolType = (data[0] >> 4) & 0x01
+	g.Reserved = (data[0] >> 3) & 0x01
+	g.SequenceNumberFlag = ((data[0] >> 1) & 0x01) == 1
+	g.NPDUFlag = (data[0] & 0x01) == 1
+	g.ExtensionHeaderFlag = ((data[0] >> 2) & 0x01) == 1
+	g.MessageType = data[1]
+	g.MessageLength = binary.BigEndian.Uint16(data[2:4])
+	pLen := 8 + g.MessageLength
+	if uint16(dLen) < pLen {
+		return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+	}
+	//  Field used to multiplex different connections in the same GTP tunnel.
+	g.TEID = binary.BigEndian.Uint32(data[4:8])
+	cIndex := uint16(hLen)
+	if g.SequenceNumberFlag || g.NPDUFlag || g.ExtensionHeaderFlag {
+		hLen += 4
+		cIndex += 4
+		if dLen < hLen {
+			return fmt.Errorf("GTP packet too small: %d bytes", dLen)
+		}
+		if g.SequenceNumberFlag {
+			g.SequenceNumber = binary.BigEndian.Uint16(data[8:10])
+		}
+		if g.NPDUFlag {
+			g.NPDU = data[10]
+		}
+		if g.ExtensionHeaderFlag {
+			extensionFlag := true
+			for extensionFlag {
+				extensionType := uint8(data[cIndex-1])
+				extensionLength := uint(data[cIndex])
+				if extensionLength == 0 {
+					return fmt.Errorf("GTP packet with invalid extension header")
+				}
+				// extensionLength is in 4-octet units
+				lIndex := cIndex + (uint16(extensionLength) * 4)
+				if uint16(dLen) < lIndex {
+					return fmt.Errorf("GTP packet with small extension header: %d bytes", dLen)
+				}
+				content := data[cIndex+1 : lIndex-1]
+				eh := GTPExtensionHeader{Type: extensionType, Content: content}
+				g.GTPExtensionHeaders = append(g.GTPExtensionHeaders, eh)
+				cIndex = lIndex
+				// Check if coming bytes are from an extension header
+				extensionFlag = data[cIndex-1] != 0
+
+			}
+		}
+	}
+	g.BaseLayer = BaseLayer{Contents: data[:cIndex], Payload: data[cIndex:]}
+	return nil
+
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (g *GTPv1U) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	data, err := b.PrependBytes(gtpMinimumSizeInBytes)
+	if err != nil {
+		return err
+	}
+	data[0] |= (g.Version << 5)
+	data[0] |= (1 << 4)
+	if len(g.GTPExtensionHeaders) > 0 {
+		data[0] |= 0x04
+		g.ExtensionHeaderFlag = true
+	}
+	if g.SequenceNumberFlag {
+		data[0] |= 0x02
+	}
+	if g.NPDUFlag {
+		data[0] |= 0x01
+	}
+	data[1] = g.MessageType
+	binary.BigEndian.PutUint16(data[2:4], g.MessageLength)
+	binary.BigEndian.PutUint32(data[4:8], g.TEID)
+	if g.ExtensionHeaderFlag || g.SequenceNumberFlag || g.NPDUFlag {
+		data, err := b.AppendBytes(4)
+		if err != nil {
+			return err
+		}
+		binary.BigEndian.PutUint16(data[:2], g.SequenceNumber)
+		data[2] = g.NPDU
+		for _, eh := range g.GTPExtensionHeaders {
+			data[len(data)-1] = eh.Type
+			lContent := len(eh.Content)
+			// extensionLength is in 4-octet units
+			extensionLength := (lContent + 2) / 4
+			// Get two extra byte for the next extension header type and length
+			data, err = b.AppendBytes(lContent + 2)
+			if err != nil {
+				return err
+			}
+			data[0] = byte(extensionLength)
+			copy(data[1:lContent+1], eh.Content)
+		}
+	}
+	return nil
+
+}
+
+// CanDecode returns a set of layers that GTP objects can decode.
+func (g *GTPv1U) CanDecode() gopacket.LayerClass {
+	return LayerTypeGTPv1U
+}
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+func (g *GTPv1U) NextLayerType() gopacket.LayerType {
+	if len(g.LayerPayload()) == 0 {
+		return gopacket.LayerTypeZero
+	}
+	version := uint8(g.LayerPayload()[0]) >> 4
+	if version == 4 {
+		return LayerTypeIPv4
+	} else if version == 6 {
+		return LayerTypeIPv6
+	} else {
+		return LayerTypePPP
+	}
+}
+
+func decodeGTPv1u(data []byte, p gopacket.PacketBuilder) error {
+	gtp := &GTPv1U{}
+	err := gtp.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(gtp)
+	return p.NextDecoder(gtp.NextLayerType())
+}
diff --git a/vendor/github.com/google/gopacket/layers/iana_ports.go b/vendor/github.com/google/gopacket/layers/iana_ports.go
new file mode 100644
index 0000000..ddcf3ec
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/iana_ports.go
@@ -0,0 +1,11351 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+
+package layers
+
+// Created by gen.go, don't edit manually
+// Generated at 2017-10-23 09:57:28.214859163 -0600 MDT m=+1.011679290
+// Fetched from "http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xml"
+
+// TCPPortNames contains the port names for all TCP ports.
+var TCPPortNames = tcpPortNames
+
+// UDPPortNames contains the port names for all UDP ports.
+var UDPPortNames = udpPortNames
+
+// SCTPPortNames contains the port names for all SCTP ports.
+var SCTPPortNames = sctpPortNames
+
+var tcpPortNames = map[TCPPort]string{
+	1:     "tcpmux",
+	2:     "compressnet",
+	3:     "compressnet",
+	5:     "rje",
+	7:     "echo",
+	9:     "discard",
+	11:    "systat",
+	13:    "daytime",
+	17:    "qotd",
+	18:    "msp",
+	19:    "chargen",
+	20:    "ftp-data",
+	21:    "ftp",
+	22:    "ssh",
+	23:    "telnet",
+	25:    "smtp",
+	27:    "nsw-fe",
+	29:    "msg-icp",
+	31:    "msg-auth",
+	33:    "dsp",
+	37:    "time",
+	38:    "rap",
+	39:    "rlp",
+	41:    "graphics",
+	42:    "name",
+	43:    "nicname",
+	44:    "mpm-flags",
+	45:    "mpm",
+	46:    "mpm-snd",
+	48:    "auditd",
+	49:    "tacacs",
+	50:    "re-mail-ck",
+	52:    "xns-time",
+	53:    "domain",
+	54:    "xns-ch",
+	55:    "isi-gl",
+	56:    "xns-auth",
+	58:    "xns-mail",
+	62:    "acas",
+	63:    "whoispp",
+	64:    "covia",
+	65:    "tacacs-ds",
+	66:    "sql-net",
+	67:    "bootps",
+	68:    "bootpc",
+	69:    "tftp",
+	70:    "gopher",
+	71:    "netrjs-1",
+	72:    "netrjs-2",
+	73:    "netrjs-3",
+	74:    "netrjs-4",
+	76:    "deos",
+	78:    "vettcp",
+	79:    "finger",
+	80:    "http",
+	82:    "xfer",
+	83:    "mit-ml-dev",
+	84:    "ctf",
+	85:    "mit-ml-dev",
+	86:    "mfcobol",
+	88:    "kerberos",
+	89:    "su-mit-tg",
+	90:    "dnsix",
+	91:    "mit-dov",
+	92:    "npp",
+	93:    "dcp",
+	94:    "objcall",
+	95:    "supdup",
+	96:    "dixie",
+	97:    "swift-rvf",
+	98:    "tacnews",
+	99:    "metagram",
+	101:   "hostname",
+	102:   "iso-tsap",
+	103:   "gppitnp",
+	104:   "acr-nema",
+	105:   "cso",
+	106:   "3com-tsmux",
+	107:   "rtelnet",
+	108:   "snagas",
+	109:   "pop2",
+	110:   "pop3",
+	111:   "sunrpc",
+	112:   "mcidas",
+	113:   "ident",
+	115:   "sftp",
+	116:   "ansanotify",
+	117:   "uucp-path",
+	118:   "sqlserv",
+	119:   "nntp",
+	120:   "cfdptkt",
+	121:   "erpc",
+	122:   "smakynet",
+	123:   "ntp",
+	124:   "ansatrader",
+	125:   "locus-map",
+	126:   "nxedit",
+	127:   "locus-con",
+	128:   "gss-xlicen",
+	129:   "pwdgen",
+	130:   "cisco-fna",
+	131:   "cisco-tna",
+	132:   "cisco-sys",
+	133:   "statsrv",
+	134:   "ingres-net",
+	135:   "epmap",
+	136:   "profile",
+	137:   "netbios-ns",
+	138:   "netbios-dgm",
+	139:   "netbios-ssn",
+	140:   "emfis-data",
+	141:   "emfis-cntl",
+	142:   "bl-idm",
+	143:   "imap",
+	144:   "uma",
+	145:   "uaac",
+	146:   "iso-tp0",
+	147:   "iso-ip",
+	148:   "jargon",
+	149:   "aed-512",
+	150:   "sql-net",
+	151:   "hems",
+	152:   "bftp",
+	153:   "sgmp",
+	154:   "netsc-prod",
+	155:   "netsc-dev",
+	156:   "sqlsrv",
+	157:   "knet-cmp",
+	158:   "pcmail-srv",
+	159:   "nss-routing",
+	160:   "sgmp-traps",
+	161:   "snmp",
+	162:   "snmptrap",
+	163:   "cmip-man",
+	164:   "cmip-agent",
+	165:   "xns-courier",
+	166:   "s-net",
+	167:   "namp",
+	168:   "rsvd",
+	169:   "send",
+	170:   "print-srv",
+	171:   "multiplex",
+	172:   "cl-1",
+	173:   "xyplex-mux",
+	174:   "mailq",
+	175:   "vmnet",
+	176:   "genrad-mux",
+	177:   "xdmcp",
+	178:   "nextstep",
+	179:   "bgp",
+	180:   "ris",
+	181:   "unify",
+	182:   "audit",
+	183:   "ocbinder",
+	184:   "ocserver",
+	185:   "remote-kis",
+	186:   "kis",
+	187:   "aci",
+	188:   "mumps",
+	189:   "qft",
+	190:   "gacp",
+	191:   "prospero",
+	192:   "osu-nms",
+	193:   "srmp",
+	194:   "irc",
+	195:   "dn6-nlm-aud",
+	196:   "dn6-smm-red",
+	197:   "dls",
+	198:   "dls-mon",
+	199:   "smux",
+	200:   "src",
+	201:   "at-rtmp",
+	202:   "at-nbp",
+	203:   "at-3",
+	204:   "at-echo",
+	205:   "at-5",
+	206:   "at-zis",
+	207:   "at-7",
+	208:   "at-8",
+	209:   "qmtp",
+	210:   "z39-50",
+	211:   "914c-g",
+	212:   "anet",
+	213:   "ipx",
+	214:   "vmpwscs",
+	215:   "softpc",
+	216:   "CAIlic",
+	217:   "dbase",
+	218:   "mpp",
+	219:   "uarps",
+	220:   "imap3",
+	221:   "fln-spx",
+	222:   "rsh-spx",
+	223:   "cdc",
+	224:   "masqdialer",
+	242:   "direct",
+	243:   "sur-meas",
+	244:   "inbusiness",
+	245:   "link",
+	246:   "dsp3270",
+	247:   "subntbcst-tftp",
+	248:   "bhfhs",
+	256:   "rap",
+	257:   "set",
+	259:   "esro-gen",
+	260:   "openport",
+	261:   "nsiiops",
+	262:   "arcisdms",
+	263:   "hdap",
+	264:   "bgmp",
+	265:   "x-bone-ctl",
+	266:   "sst",
+	267:   "td-service",
+	268:   "td-replica",
+	269:   "manet",
+	271:   "pt-tls",
+	280:   "http-mgmt",
+	281:   "personal-link",
+	282:   "cableport-ax",
+	283:   "rescap",
+	284:   "corerjd",
+	286:   "fxp",
+	287:   "k-block",
+	308:   "novastorbakcup",
+	309:   "entrusttime",
+	310:   "bhmds",
+	311:   "asip-webadmin",
+	312:   "vslmp",
+	313:   "magenta-logic",
+	314:   "opalis-robot",
+	315:   "dpsi",
+	316:   "decauth",
+	317:   "zannet",
+	318:   "pkix-timestamp",
+	319:   "ptp-event",
+	320:   "ptp-general",
+	321:   "pip",
+	322:   "rtsps",
+	323:   "rpki-rtr",
+	324:   "rpki-rtr-tls",
+	333:   "texar",
+	344:   "pdap",
+	345:   "pawserv",
+	346:   "zserv",
+	347:   "fatserv",
+	348:   "csi-sgwp",
+	349:   "mftp",
+	350:   "matip-type-a",
+	351:   "matip-type-b",
+	352:   "dtag-ste-sb",
+	353:   "ndsauth",
+	354:   "bh611",
+	355:   "datex-asn",
+	356:   "cloanto-net-1",
+	357:   "bhevent",
+	358:   "shrinkwrap",
+	359:   "nsrmp",
+	360:   "scoi2odialog",
+	361:   "semantix",
+	362:   "srssend",
+	363:   "rsvp-tunnel",
+	364:   "aurora-cmgr",
+	365:   "dtk",
+	366:   "odmr",
+	367:   "mortgageware",
+	368:   "qbikgdp",
+	369:   "rpc2portmap",
+	370:   "codaauth2",
+	371:   "clearcase",
+	372:   "ulistproc",
+	373:   "legent-1",
+	374:   "legent-2",
+	375:   "hassle",
+	376:   "nip",
+	377:   "tnETOS",
+	378:   "dsETOS",
+	379:   "is99c",
+	380:   "is99s",
+	381:   "hp-collector",
+	382:   "hp-managed-node",
+	383:   "hp-alarm-mgr",
+	384:   "arns",
+	385:   "ibm-app",
+	386:   "asa",
+	387:   "aurp",
+	388:   "unidata-ldm",
+	389:   "ldap",
+	390:   "uis",
+	391:   "synotics-relay",
+	392:   "synotics-broker",
+	393:   "meta5",
+	394:   "embl-ndt",
+	395:   "netcp",
+	396:   "netware-ip",
+	397:   "mptn",
+	398:   "kryptolan",
+	399:   "iso-tsap-c2",
+	400:   "osb-sd",
+	401:   "ups",
+	402:   "genie",
+	403:   "decap",
+	404:   "nced",
+	405:   "ncld",
+	406:   "imsp",
+	407:   "timbuktu",
+	408:   "prm-sm",
+	409:   "prm-nm",
+	410:   "decladebug",
+	411:   "rmt",
+	412:   "synoptics-trap",
+	413:   "smsp",
+	414:   "infoseek",
+	415:   "bnet",
+	416:   "silverplatter",
+	417:   "onmux",
+	418:   "hyper-g",
+	419:   "ariel1",
+	420:   "smpte",
+	421:   "ariel2",
+	422:   "ariel3",
+	423:   "opc-job-start",
+	424:   "opc-job-track",
+	425:   "icad-el",
+	426:   "smartsdp",
+	427:   "svrloc",
+	428:   "ocs-cmu",
+	429:   "ocs-amu",
+	430:   "utmpsd",
+	431:   "utmpcd",
+	432:   "iasd",
+	433:   "nnsp",
+	434:   "mobileip-agent",
+	435:   "mobilip-mn",
+	436:   "dna-cml",
+	437:   "comscm",
+	438:   "dsfgw",
+	439:   "dasp",
+	440:   "sgcp",
+	441:   "decvms-sysmgt",
+	442:   "cvc-hostd",
+	443:   "https",
+	444:   "snpp",
+	445:   "microsoft-ds",
+	446:   "ddm-rdb",
+	447:   "ddm-dfm",
+	448:   "ddm-ssl",
+	449:   "as-servermap",
+	450:   "tserver",
+	451:   "sfs-smp-net",
+	452:   "sfs-config",
+	453:   "creativeserver",
+	454:   "contentserver",
+	455:   "creativepartnr",
+	456:   "macon-tcp",
+	457:   "scohelp",
+	458:   "appleqtc",
+	459:   "ampr-rcmd",
+	460:   "skronk",
+	461:   "datasurfsrv",
+	462:   "datasurfsrvsec",
+	463:   "alpes",
+	464:   "kpasswd",
+	465:   "urd",
+	466:   "digital-vrc",
+	467:   "mylex-mapd",
+	468:   "photuris",
+	469:   "rcp",
+	470:   "scx-proxy",
+	471:   "mondex",
+	472:   "ljk-login",
+	473:   "hybrid-pop",
+	474:   "tn-tl-w1",
+	475:   "tcpnethaspsrv",
+	476:   "tn-tl-fd1",
+	477:   "ss7ns",
+	478:   "spsc",
+	479:   "iafserver",
+	480:   "iafdbase",
+	481:   "ph",
+	482:   "bgs-nsi",
+	483:   "ulpnet",
+	484:   "integra-sme",
+	485:   "powerburst",
+	486:   "avian",
+	487:   "saft",
+	488:   "gss-http",
+	489:   "nest-protocol",
+	490:   "micom-pfs",
+	491:   "go-login",
+	492:   "ticf-1",
+	493:   "ticf-2",
+	494:   "pov-ray",
+	495:   "intecourier",
+	496:   "pim-rp-disc",
+	497:   "retrospect",
+	498:   "siam",
+	499:   "iso-ill",
+	500:   "isakmp",
+	501:   "stmf",
+	502:   "mbap",
+	503:   "intrinsa",
+	504:   "citadel",
+	505:   "mailbox-lm",
+	506:   "ohimsrv",
+	507:   "crs",
+	508:   "xvttp",
+	509:   "snare",
+	510:   "fcp",
+	511:   "passgo",
+	512:   "exec",
+	513:   "login",
+	514:   "shell",
+	515:   "printer",
+	516:   "videotex",
+	517:   "talk",
+	518:   "ntalk",
+	519:   "utime",
+	520:   "efs",
+	521:   "ripng",
+	522:   "ulp",
+	523:   "ibm-db2",
+	524:   "ncp",
+	525:   "timed",
+	526:   "tempo",
+	527:   "stx",
+	528:   "custix",
+	529:   "irc-serv",
+	530:   "courier",
+	531:   "conference",
+	532:   "netnews",
+	533:   "netwall",
+	534:   "windream",
+	535:   "iiop",
+	536:   "opalis-rdv",
+	537:   "nmsp",
+	538:   "gdomap",
+	539:   "apertus-ldp",
+	540:   "uucp",
+	541:   "uucp-rlogin",
+	542:   "commerce",
+	543:   "klogin",
+	544:   "kshell",
+	545:   "appleqtcsrvr",
+	546:   "dhcpv6-client",
+	547:   "dhcpv6-server",
+	548:   "afpovertcp",
+	549:   "idfp",
+	550:   "new-rwho",
+	551:   "cybercash",
+	552:   "devshr-nts",
+	553:   "pirp",
+	554:   "rtsp",
+	555:   "dsf",
+	556:   "remotefs",
+	557:   "openvms-sysipc",
+	558:   "sdnskmp",
+	559:   "teedtap",
+	560:   "rmonitor",
+	561:   "monitor",
+	562:   "chshell",
+	563:   "nntps",
+	564:   "9pfs",
+	565:   "whoami",
+	566:   "streettalk",
+	567:   "banyan-rpc",
+	568:   "ms-shuttle",
+	569:   "ms-rome",
+	570:   "meter",
+	571:   "meter",
+	572:   "sonar",
+	573:   "banyan-vip",
+	574:   "ftp-agent",
+	575:   "vemmi",
+	576:   "ipcd",
+	577:   "vnas",
+	578:   "ipdd",
+	579:   "decbsrv",
+	580:   "sntp-heartbeat",
+	581:   "bdp",
+	582:   "scc-security",
+	583:   "philips-vc",
+	584:   "keyserver",
+	586:   "password-chg",
+	587:   "submission",
+	588:   "cal",
+	589:   "eyelink",
+	590:   "tns-cml",
+	591:   "http-alt",
+	592:   "eudora-set",
+	593:   "http-rpc-epmap",
+	594:   "tpip",
+	595:   "cab-protocol",
+	596:   "smsd",
+	597:   "ptcnameservice",
+	598:   "sco-websrvrmg3",
+	599:   "acp",
+	600:   "ipcserver",
+	601:   "syslog-conn",
+	602:   "xmlrpc-beep",
+	603:   "idxp",
+	604:   "tunnel",
+	605:   "soap-beep",
+	606:   "urm",
+	607:   "nqs",
+	608:   "sift-uft",
+	609:   "npmp-trap",
+	610:   "npmp-local",
+	611:   "npmp-gui",
+	612:   "hmmp-ind",
+	613:   "hmmp-op",
+	614:   "sshell",
+	615:   "sco-inetmgr",
+	616:   "sco-sysmgr",
+	617:   "sco-dtmgr",
+	618:   "dei-icda",
+	619:   "compaq-evm",
+	620:   "sco-websrvrmgr",
+	621:   "escp-ip",
+	622:   "collaborator",
+	623:   "oob-ws-http",
+	624:   "cryptoadmin",
+	625:   "dec-dlm",
+	626:   "asia",
+	627:   "passgo-tivoli",
+	628:   "qmqp",
+	629:   "3com-amp3",
+	630:   "rda",
+	631:   "ipp",
+	632:   "bmpp",
+	633:   "servstat",
+	634:   "ginad",
+	635:   "rlzdbase",
+	636:   "ldaps",
+	637:   "lanserver",
+	638:   "mcns-sec",
+	639:   "msdp",
+	640:   "entrust-sps",
+	641:   "repcmd",
+	642:   "esro-emsdp",
+	643:   "sanity",
+	644:   "dwr",
+	645:   "pssc",
+	646:   "ldp",
+	647:   "dhcp-failover",
+	648:   "rrp",
+	649:   "cadview-3d",
+	650:   "obex",
+	651:   "ieee-mms",
+	652:   "hello-port",
+	653:   "repscmd",
+	654:   "aodv",
+	655:   "tinc",
+	656:   "spmp",
+	657:   "rmc",
+	658:   "tenfold",
+	660:   "mac-srvr-admin",
+	661:   "hap",
+	662:   "pftp",
+	663:   "purenoise",
+	664:   "oob-ws-https",
+	665:   "sun-dr",
+	666:   "mdqs",
+	667:   "disclose",
+	668:   "mecomm",
+	669:   "meregister",
+	670:   "vacdsm-sws",
+	671:   "vacdsm-app",
+	672:   "vpps-qua",
+	673:   "cimplex",
+	674:   "acap",
+	675:   "dctp",
+	676:   "vpps-via",
+	677:   "vpp",
+	678:   "ggf-ncp",
+	679:   "mrm",
+	680:   "entrust-aaas",
+	681:   "entrust-aams",
+	682:   "xfr",
+	683:   "corba-iiop",
+	684:   "corba-iiop-ssl",
+	685:   "mdc-portmapper",
+	686:   "hcp-wismar",
+	687:   "asipregistry",
+	688:   "realm-rusd",
+	689:   "nmap",
+	690:   "vatp",
+	691:   "msexch-routing",
+	692:   "hyperwave-isp",
+	693:   "connendp",
+	694:   "ha-cluster",
+	695:   "ieee-mms-ssl",
+	696:   "rushd",
+	697:   "uuidgen",
+	698:   "olsr",
+	699:   "accessnetwork",
+	700:   "epp",
+	701:   "lmp",
+	702:   "iris-beep",
+	704:   "elcsd",
+	705:   "agentx",
+	706:   "silc",
+	707:   "borland-dsj",
+	709:   "entrust-kmsh",
+	710:   "entrust-ash",
+	711:   "cisco-tdp",
+	712:   "tbrpf",
+	713:   "iris-xpc",
+	714:   "iris-xpcs",
+	715:   "iris-lwz",
+	729:   "netviewdm1",
+	730:   "netviewdm2",
+	731:   "netviewdm3",
+	741:   "netgw",
+	742:   "netrcs",
+	744:   "flexlm",
+	747:   "fujitsu-dev",
+	748:   "ris-cm",
+	749:   "kerberos-adm",
+	750:   "rfile",
+	751:   "pump",
+	752:   "qrh",
+	753:   "rrh",
+	754:   "tell",
+	758:   "nlogin",
+	759:   "con",
+	760:   "ns",
+	761:   "rxe",
+	762:   "quotad",
+	763:   "cycleserv",
+	764:   "omserv",
+	765:   "webster",
+	767:   "phonebook",
+	769:   "vid",
+	770:   "cadlock",
+	771:   "rtip",
+	772:   "cycleserv2",
+	773:   "submit",
+	774:   "rpasswd",
+	775:   "entomb",
+	776:   "wpages",
+	777:   "multiling-http",
+	780:   "wpgs",
+	800:   "mdbs-daemon",
+	801:   "device",
+	802:   "mbap-s",
+	810:   "fcp-udp",
+	828:   "itm-mcell-s",
+	829:   "pkix-3-ca-ra",
+	830:   "netconf-ssh",
+	831:   "netconf-beep",
+	832:   "netconfsoaphttp",
+	833:   "netconfsoapbeep",
+	847:   "dhcp-failover2",
+	848:   "gdoi",
+	853:   "domain-s",
+	854:   "dlep",
+	860:   "iscsi",
+	861:   "owamp-control",
+	862:   "twamp-control",
+	873:   "rsync",
+	886:   "iclcnet-locate",
+	887:   "iclcnet-svinfo",
+	888:   "accessbuilder",
+	900:   "omginitialrefs",
+	901:   "smpnameres",
+	902:   "ideafarm-door",
+	903:   "ideafarm-panic",
+	910:   "kink",
+	911:   "xact-backup",
+	912:   "apex-mesh",
+	913:   "apex-edge",
+	953:   "rndc",
+	989:   "ftps-data",
+	990:   "ftps",
+	991:   "nas",
+	992:   "telnets",
+	993:   "imaps",
+	995:   "pop3s",
+	996:   "vsinet",
+	997:   "maitrd",
+	998:   "busboy",
+	999:   "garcon",
+	1000:  "cadlock2",
+	1001:  "webpush",
+	1010:  "surf",
+	1021:  "exp1",
+	1022:  "exp2",
+	1025:  "blackjack",
+	1026:  "cap",
+	1029:  "solid-mux",
+	1033:  "netinfo-local",
+	1034:  "activesync",
+	1035:  "mxxrlogin",
+	1036:  "nsstp",
+	1037:  "ams",
+	1038:  "mtqp",
+	1039:  "sbl",
+	1040:  "netarx",
+	1041:  "danf-ak2",
+	1042:  "afrog",
+	1043:  "boinc-client",
+	1044:  "dcutility",
+	1045:  "fpitp",
+	1046:  "wfremotertm",
+	1047:  "neod1",
+	1048:  "neod2",
+	1049:  "td-postman",
+	1050:  "cma",
+	1051:  "optima-vnet",
+	1052:  "ddt",
+	1053:  "remote-as",
+	1054:  "brvread",
+	1055:  "ansyslmd",
+	1056:  "vfo",
+	1057:  "startron",
+	1058:  "nim",
+	1059:  "nimreg",
+	1060:  "polestar",
+	1061:  "kiosk",
+	1062:  "veracity",
+	1063:  "kyoceranetdev",
+	1064:  "jstel",
+	1065:  "syscomlan",
+	1066:  "fpo-fns",
+	1067:  "instl-boots",
+	1068:  "instl-bootc",
+	1069:  "cognex-insight",
+	1070:  "gmrupdateserv",
+	1071:  "bsquare-voip",
+	1072:  "cardax",
+	1073:  "bridgecontrol",
+	1074:  "warmspotMgmt",
+	1075:  "rdrmshc",
+	1076:  "dab-sti-c",
+	1077:  "imgames",
+	1078:  "avocent-proxy",
+	1079:  "asprovatalk",
+	1080:  "socks",
+	1081:  "pvuniwien",
+	1082:  "amt-esd-prot",
+	1083:  "ansoft-lm-1",
+	1084:  "ansoft-lm-2",
+	1085:  "webobjects",
+	1086:  "cplscrambler-lg",
+	1087:  "cplscrambler-in",
+	1088:  "cplscrambler-al",
+	1089:  "ff-annunc",
+	1090:  "ff-fms",
+	1091:  "ff-sm",
+	1092:  "obrpd",
+	1093:  "proofd",
+	1094:  "rootd",
+	1095:  "nicelink",
+	1096:  "cnrprotocol",
+	1097:  "sunclustermgr",
+	1098:  "rmiactivation",
+	1099:  "rmiregistry",
+	1100:  "mctp",
+	1101:  "pt2-discover",
+	1102:  "adobeserver-1",
+	1103:  "adobeserver-2",
+	1104:  "xrl",
+	1105:  "ftranhc",
+	1106:  "isoipsigport-1",
+	1107:  "isoipsigport-2",
+	1108:  "ratio-adp",
+	1110:  "webadmstart",
+	1111:  "lmsocialserver",
+	1112:  "icp",
+	1113:  "ltp-deepspace",
+	1114:  "mini-sql",
+	1115:  "ardus-trns",
+	1116:  "ardus-cntl",
+	1117:  "ardus-mtrns",
+	1118:  "sacred",
+	1119:  "bnetgame",
+	1120:  "bnetfile",
+	1121:  "rmpp",
+	1122:  "availant-mgr",
+	1123:  "murray",
+	1124:  "hpvmmcontrol",
+	1125:  "hpvmmagent",
+	1126:  "hpvmmdata",
+	1127:  "kwdb-commn",
+	1128:  "saphostctrl",
+	1129:  "saphostctrls",
+	1130:  "casp",
+	1131:  "caspssl",
+	1132:  "kvm-via-ip",
+	1133:  "dfn",
+	1134:  "aplx",
+	1135:  "omnivision",
+	1136:  "hhb-gateway",
+	1137:  "trim",
+	1138:  "encrypted-admin",
+	1139:  "evm",
+	1140:  "autonoc",
+	1141:  "mxomss",
+	1142:  "edtools",
+	1143:  "imyx",
+	1144:  "fuscript",
+	1145:  "x9-icue",
+	1146:  "audit-transfer",
+	1147:  "capioverlan",
+	1148:  "elfiq-repl",
+	1149:  "bvtsonar",
+	1150:  "blaze",
+	1151:  "unizensus",
+	1152:  "winpoplanmess",
+	1153:  "c1222-acse",
+	1154:  "resacommunity",
+	1155:  "nfa",
+	1156:  "iascontrol-oms",
+	1157:  "iascontrol",
+	1158:  "dbcontrol-oms",
+	1159:  "oracle-oms",
+	1160:  "olsv",
+	1161:  "health-polling",
+	1162:  "health-trap",
+	1163:  "sddp",
+	1164:  "qsm-proxy",
+	1165:  "qsm-gui",
+	1166:  "qsm-remote",
+	1167:  "cisco-ipsla",
+	1168:  "vchat",
+	1169:  "tripwire",
+	1170:  "atc-lm",
+	1171:  "atc-appserver",
+	1172:  "dnap",
+	1173:  "d-cinema-rrp",
+	1174:  "fnet-remote-ui",
+	1175:  "dossier",
+	1176:  "indigo-server",
+	1177:  "dkmessenger",
+	1178:  "sgi-storman",
+	1179:  "b2n",
+	1180:  "mc-client",
+	1181:  "3comnetman",
+	1182:  "accelenet",
+	1183:  "llsurfup-http",
+	1184:  "llsurfup-https",
+	1185:  "catchpole",
+	1186:  "mysql-cluster",
+	1187:  "alias",
+	1188:  "hp-webadmin",
+	1189:  "unet",
+	1190:  "commlinx-avl",
+	1191:  "gpfs",
+	1192:  "caids-sensor",
+	1193:  "fiveacross",
+	1194:  "openvpn",
+	1195:  "rsf-1",
+	1196:  "netmagic",
+	1197:  "carrius-rshell",
+	1198:  "cajo-discovery",
+	1199:  "dmidi",
+	1200:  "scol",
+	1201:  "nucleus-sand",
+	1202:  "caiccipc",
+	1203:  "ssslic-mgr",
+	1204:  "ssslog-mgr",
+	1205:  "accord-mgc",
+	1206:  "anthony-data",
+	1207:  "metasage",
+	1208:  "seagull-ais",
+	1209:  "ipcd3",
+	1210:  "eoss",
+	1211:  "groove-dpp",
+	1212:  "lupa",
+	1213:  "mpc-lifenet",
+	1214:  "kazaa",
+	1215:  "scanstat-1",
+	1216:  "etebac5",
+	1217:  "hpss-ndapi",
+	1218:  "aeroflight-ads",
+	1219:  "aeroflight-ret",
+	1220:  "qt-serveradmin",
+	1221:  "sweetware-apps",
+	1222:  "nerv",
+	1223:  "tgp",
+	1224:  "vpnz",
+	1225:  "slinkysearch",
+	1226:  "stgxfws",
+	1227:  "dns2go",
+	1228:  "florence",
+	1229:  "zented",
+	1230:  "periscope",
+	1231:  "menandmice-lpm",
+	1232:  "first-defense",
+	1233:  "univ-appserver",
+	1234:  "search-agent",
+	1235:  "mosaicsyssvc1",
+	1236:  "bvcontrol",
+	1237:  "tsdos390",
+	1238:  "hacl-qs",
+	1239:  "nmsd",
+	1240:  "instantia",
+	1241:  "nessus",
+	1242:  "nmasoverip",
+	1243:  "serialgateway",
+	1244:  "isbconference1",
+	1245:  "isbconference2",
+	1246:  "payrouter",
+	1247:  "visionpyramid",
+	1248:  "hermes",
+	1249:  "mesavistaco",
+	1250:  "swldy-sias",
+	1251:  "servergraph",
+	1252:  "bspne-pcc",
+	1253:  "q55-pcc",
+	1254:  "de-noc",
+	1255:  "de-cache-query",
+	1256:  "de-server",
+	1257:  "shockwave2",
+	1258:  "opennl",
+	1259:  "opennl-voice",
+	1260:  "ibm-ssd",
+	1261:  "mpshrsv",
+	1262:  "qnts-orb",
+	1263:  "dka",
+	1264:  "prat",
+	1265:  "dssiapi",
+	1266:  "dellpwrappks",
+	1267:  "epc",
+	1268:  "propel-msgsys",
+	1269:  "watilapp",
+	1270:  "opsmgr",
+	1271:  "excw",
+	1272:  "cspmlockmgr",
+	1273:  "emc-gateway",
+	1274:  "t1distproc",
+	1275:  "ivcollector",
+	1277:  "miva-mqs",
+	1278:  "dellwebadmin-1",
+	1279:  "dellwebadmin-2",
+	1280:  "pictrography",
+	1281:  "healthd",
+	1282:  "emperion",
+	1283:  "productinfo",
+	1284:  "iee-qfx",
+	1285:  "neoiface",
+	1286:  "netuitive",
+	1287:  "routematch",
+	1288:  "navbuddy",
+	1289:  "jwalkserver",
+	1290:  "winjaserver",
+	1291:  "seagulllms",
+	1292:  "dsdn",
+	1293:  "pkt-krb-ipsec",
+	1294:  "cmmdriver",
+	1295:  "ehtp",
+	1296:  "dproxy",
+	1297:  "sdproxy",
+	1298:  "lpcp",
+	1299:  "hp-sci",
+	1300:  "h323hostcallsc",
+	1301:  "ci3-software-1",
+	1302:  "ci3-software-2",
+	1303:  "sftsrv",
+	1304:  "boomerang",
+	1305:  "pe-mike",
+	1306:  "re-conn-proto",
+	1307:  "pacmand",
+	1308:  "odsi",
+	1309:  "jtag-server",
+	1310:  "husky",
+	1311:  "rxmon",
+	1312:  "sti-envision",
+	1313:  "bmc-patroldb",
+	1314:  "pdps",
+	1315:  "els",
+	1316:  "exbit-escp",
+	1317:  "vrts-ipcserver",
+	1318:  "krb5gatekeeper",
+	1319:  "amx-icsp",
+	1320:  "amx-axbnet",
+	1321:  "pip",
+	1322:  "novation",
+	1323:  "brcd",
+	1324:  "delta-mcp",
+	1325:  "dx-instrument",
+	1326:  "wimsic",
+	1327:  "ultrex",
+	1328:  "ewall",
+	1329:  "netdb-export",
+	1330:  "streetperfect",
+	1331:  "intersan",
+	1332:  "pcia-rxp-b",
+	1333:  "passwrd-policy",
+	1334:  "writesrv",
+	1335:  "digital-notary",
+	1336:  "ischat",
+	1337:  "menandmice-dns",
+	1338:  "wmc-log-svc",
+	1339:  "kjtsiteserver",
+	1340:  "naap",
+	1341:  "qubes",
+	1342:  "esbroker",
+	1343:  "re101",
+	1344:  "icap",
+	1345:  "vpjp",
+	1346:  "alta-ana-lm",
+	1347:  "bbn-mmc",
+	1348:  "bbn-mmx",
+	1349:  "sbook",
+	1350:  "editbench",
+	1351:  "equationbuilder",
+	1352:  "lotusnote",
+	1353:  "relief",
+	1354:  "XSIP-network",
+	1355:  "intuitive-edge",
+	1356:  "cuillamartin",
+	1357:  "pegboard",
+	1358:  "connlcli",
+	1359:  "ftsrv",
+	1360:  "mimer",
+	1361:  "linx",
+	1362:  "timeflies",
+	1363:  "ndm-requester",
+	1364:  "ndm-server",
+	1365:  "adapt-sna",
+	1366:  "netware-csp",
+	1367:  "dcs",
+	1368:  "screencast",
+	1369:  "gv-us",
+	1370:  "us-gv",
+	1371:  "fc-cli",
+	1372:  "fc-ser",
+	1373:  "chromagrafx",
+	1374:  "molly",
+	1375:  "bytex",
+	1376:  "ibm-pps",
+	1377:  "cichlid",
+	1378:  "elan",
+	1379:  "dbreporter",
+	1380:  "telesis-licman",
+	1381:  "apple-licman",
+	1382:  "udt-os",
+	1383:  "gwha",
+	1384:  "os-licman",
+	1385:  "atex-elmd",
+	1386:  "checksum",
+	1387:  "cadsi-lm",
+	1388:  "objective-dbc",
+	1389:  "iclpv-dm",
+	1390:  "iclpv-sc",
+	1391:  "iclpv-sas",
+	1392:  "iclpv-pm",
+	1393:  "iclpv-nls",
+	1394:  "iclpv-nlc",
+	1395:  "iclpv-wsm",
+	1396:  "dvl-activemail",
+	1397:  "audio-activmail",
+	1398:  "video-activmail",
+	1399:  "cadkey-licman",
+	1400:  "cadkey-tablet",
+	1401:  "goldleaf-licman",
+	1402:  "prm-sm-np",
+	1403:  "prm-nm-np",
+	1404:  "igi-lm",
+	1405:  "ibm-res",
+	1406:  "netlabs-lm",
+	1407:  "tibet-server",
+	1408:  "sophia-lm",
+	1409:  "here-lm",
+	1410:  "hiq",
+	1411:  "af",
+	1412:  "innosys",
+	1413:  "innosys-acl",
+	1414:  "ibm-mqseries",
+	1415:  "dbstar",
+	1416:  "novell-lu6-2",
+	1417:  "timbuktu-srv1",
+	1418:  "timbuktu-srv2",
+	1419:  "timbuktu-srv3",
+	1420:  "timbuktu-srv4",
+	1421:  "gandalf-lm",
+	1422:  "autodesk-lm",
+	1423:  "essbase",
+	1424:  "hybrid",
+	1425:  "zion-lm",
+	1426:  "sais",
+	1427:  "mloadd",
+	1428:  "informatik-lm",
+	1429:  "nms",
+	1430:  "tpdu",
+	1431:  "rgtp",
+	1432:  "blueberry-lm",
+	1433:  "ms-sql-s",
+	1434:  "ms-sql-m",
+	1435:  "ibm-cics",
+	1436:  "saism",
+	1437:  "tabula",
+	1438:  "eicon-server",
+	1439:  "eicon-x25",
+	1440:  "eicon-slp",
+	1441:  "cadis-1",
+	1442:  "cadis-2",
+	1443:  "ies-lm",
+	1444:  "marcam-lm",
+	1445:  "proxima-lm",
+	1446:  "ora-lm",
+	1447:  "apri-lm",
+	1448:  "oc-lm",
+	1449:  "peport",
+	1450:  "dwf",
+	1451:  "infoman",
+	1452:  "gtegsc-lm",
+	1453:  "genie-lm",
+	1454:  "interhdl-elmd",
+	1455:  "esl-lm",
+	1456:  "dca",
+	1457:  "valisys-lm",
+	1458:  "nrcabq-lm",
+	1459:  "proshare1",
+	1460:  "proshare2",
+	1461:  "ibm-wrless-lan",
+	1462:  "world-lm",
+	1463:  "nucleus",
+	1464:  "msl-lmd",
+	1465:  "pipes",
+	1466:  "oceansoft-lm",
+	1467:  "csdmbase",
+	1468:  "csdm",
+	1469:  "aal-lm",
+	1470:  "uaiact",
+	1471:  "csdmbase",
+	1472:  "csdm",
+	1473:  "openmath",
+	1474:  "telefinder",
+	1475:  "taligent-lm",
+	1476:  "clvm-cfg",
+	1477:  "ms-sna-server",
+	1478:  "ms-sna-base",
+	1479:  "dberegister",
+	1480:  "pacerforum",
+	1481:  "airs",
+	1482:  "miteksys-lm",
+	1483:  "afs",
+	1484:  "confluent",
+	1485:  "lansource",
+	1486:  "nms-topo-serv",
+	1487:  "localinfosrvr",
+	1488:  "docstor",
+	1489:  "dmdocbroker",
+	1490:  "insitu-conf",
+	1492:  "stone-design-1",
+	1493:  "netmap-lm",
+	1494:  "ica",
+	1495:  "cvc",
+	1496:  "liberty-lm",
+	1497:  "rfx-lm",
+	1498:  "sybase-sqlany",
+	1499:  "fhc",
+	1500:  "vlsi-lm",
+	1501:  "saiscm",
+	1502:  "shivadiscovery",
+	1503:  "imtc-mcs",
+	1504:  "evb-elm",
+	1505:  "funkproxy",
+	1506:  "utcd",
+	1507:  "symplex",
+	1508:  "diagmond",
+	1509:  "robcad-lm",
+	1510:  "mvx-lm",
+	1511:  "3l-l1",
+	1512:  "wins",
+	1513:  "fujitsu-dtc",
+	1514:  "fujitsu-dtcns",
+	1515:  "ifor-protocol",
+	1516:  "vpad",
+	1517:  "vpac",
+	1518:  "vpvd",
+	1519:  "vpvc",
+	1520:  "atm-zip-office",
+	1521:  "ncube-lm",
+	1522:  "ricardo-lm",
+	1523:  "cichild-lm",
+	1524:  "ingreslock",
+	1525:  "orasrv",
+	1526:  "pdap-np",
+	1527:  "tlisrv",
+	1529:  "coauthor",
+	1530:  "rap-service",
+	1531:  "rap-listen",
+	1532:  "miroconnect",
+	1533:  "virtual-places",
+	1534:  "micromuse-lm",
+	1535:  "ampr-info",
+	1536:  "ampr-inter",
+	1537:  "sdsc-lm",
+	1538:  "3ds-lm",
+	1539:  "intellistor-lm",
+	1540:  "rds",
+	1541:  "rds2",
+	1542:  "gridgen-elmd",
+	1543:  "simba-cs",
+	1544:  "aspeclmd",
+	1545:  "vistium-share",
+	1546:  "abbaccuray",
+	1547:  "laplink",
+	1548:  "axon-lm",
+	1549:  "shivahose",
+	1550:  "3m-image-lm",
+	1551:  "hecmtl-db",
+	1552:  "pciarray",
+	1553:  "sna-cs",
+	1554:  "caci-lm",
+	1555:  "livelan",
+	1556:  "veritas-pbx",
+	1557:  "arbortext-lm",
+	1558:  "xingmpeg",
+	1559:  "web2host",
+	1560:  "asci-val",
+	1561:  "facilityview",
+	1562:  "pconnectmgr",
+	1563:  "cadabra-lm",
+	1564:  "pay-per-view",
+	1565:  "winddlb",
+	1566:  "corelvideo",
+	1567:  "jlicelmd",
+	1568:  "tsspmap",
+	1569:  "ets",
+	1570:  "orbixd",
+	1571:  "rdb-dbs-disp",
+	1572:  "chip-lm",
+	1573:  "itscomm-ns",
+	1574:  "mvel-lm",
+	1575:  "oraclenames",
+	1576:  "moldflow-lm",
+	1577:  "hypercube-lm",
+	1578:  "jacobus-lm",
+	1579:  "ioc-sea-lm",
+	1580:  "tn-tl-r1",
+	1581:  "mil-2045-47001",
+	1582:  "msims",
+	1583:  "simbaexpress",
+	1584:  "tn-tl-fd2",
+	1585:  "intv",
+	1586:  "ibm-abtact",
+	1587:  "pra-elmd",
+	1588:  "triquest-lm",
+	1589:  "vqp",
+	1590:  "gemini-lm",
+	1591:  "ncpm-pm",
+	1592:  "commonspace",
+	1593:  "mainsoft-lm",
+	1594:  "sixtrak",
+	1595:  "radio",
+	1596:  "radio-sm",
+	1597:  "orbplus-iiop",
+	1598:  "picknfs",
+	1599:  "simbaservices",
+	1600:  "issd",
+	1601:  "aas",
+	1602:  "inspect",
+	1603:  "picodbc",
+	1604:  "icabrowser",
+	1605:  "slp",
+	1606:  "slm-api",
+	1607:  "stt",
+	1608:  "smart-lm",
+	1609:  "isysg-lm",
+	1610:  "taurus-wh",
+	1611:  "ill",
+	1612:  "netbill-trans",
+	1613:  "netbill-keyrep",
+	1614:  "netbill-cred",
+	1615:  "netbill-auth",
+	1616:  "netbill-prod",
+	1617:  "nimrod-agent",
+	1618:  "skytelnet",
+	1619:  "xs-openstorage",
+	1620:  "faxportwinport",
+	1621:  "softdataphone",
+	1622:  "ontime",
+	1623:  "jaleosnd",
+	1624:  "udp-sr-port",
+	1625:  "svs-omagent",
+	1626:  "shockwave",
+	1627:  "t128-gateway",
+	1628:  "lontalk-norm",
+	1629:  "lontalk-urgnt",
+	1630:  "oraclenet8cman",
+	1631:  "visitview",
+	1632:  "pammratc",
+	1633:  "pammrpc",
+	1634:  "loaprobe",
+	1635:  "edb-server1",
+	1636:  "isdc",
+	1637:  "islc",
+	1638:  "ismc",
+	1639:  "cert-initiator",
+	1640:  "cert-responder",
+	1641:  "invision",
+	1642:  "isis-am",
+	1643:  "isis-ambc",
+	1644:  "saiseh",
+	1645:  "sightline",
+	1646:  "sa-msg-port",
+	1647:  "rsap",
+	1648:  "concurrent-lm",
+	1649:  "kermit",
+	1650:  "nkd",
+	1651:  "shiva-confsrvr",
+	1652:  "xnmp",
+	1653:  "alphatech-lm",
+	1654:  "stargatealerts",
+	1655:  "dec-mbadmin",
+	1656:  "dec-mbadmin-h",
+	1657:  "fujitsu-mmpdc",
+	1658:  "sixnetudr",
+	1659:  "sg-lm",
+	1660:  "skip-mc-gikreq",
+	1661:  "netview-aix-1",
+	1662:  "netview-aix-2",
+	1663:  "netview-aix-3",
+	1664:  "netview-aix-4",
+	1665:  "netview-aix-5",
+	1666:  "netview-aix-6",
+	1667:  "netview-aix-7",
+	1668:  "netview-aix-8",
+	1669:  "netview-aix-9",
+	1670:  "netview-aix-10",
+	1671:  "netview-aix-11",
+	1672:  "netview-aix-12",
+	1673:  "proshare-mc-1",
+	1674:  "proshare-mc-2",
+	1675:  "pdp",
+	1676:  "netcomm1",
+	1677:  "groupwise",
+	1678:  "prolink",
+	1679:  "darcorp-lm",
+	1680:  "microcom-sbp",
+	1681:  "sd-elmd",
+	1682:  "lanyon-lantern",
+	1683:  "ncpm-hip",
+	1684:  "snaresecure",
+	1685:  "n2nremote",
+	1686:  "cvmon",
+	1687:  "nsjtp-ctrl",
+	1688:  "nsjtp-data",
+	1689:  "firefox",
+	1690:  "ng-umds",
+	1691:  "empire-empuma",
+	1692:  "sstsys-lm",
+	1693:  "rrirtr",
+	1694:  "rrimwm",
+	1695:  "rrilwm",
+	1696:  "rrifmm",
+	1697:  "rrisat",
+	1698:  "rsvp-encap-1",
+	1699:  "rsvp-encap-2",
+	1700:  "mps-raft",
+	1701:  "l2f",
+	1702:  "deskshare",
+	1703:  "hb-engine",
+	1704:  "bcs-broker",
+	1705:  "slingshot",
+	1706:  "jetform",
+	1707:  "vdmplay",
+	1708:  "gat-lmd",
+	1709:  "centra",
+	1710:  "impera",
+	1711:  "pptconference",
+	1712:  "registrar",
+	1713:  "conferencetalk",
+	1714:  "sesi-lm",
+	1715:  "houdini-lm",
+	1716:  "xmsg",
+	1717:  "fj-hdnet",
+	1718:  "h323gatedisc",
+	1719:  "h323gatestat",
+	1720:  "h323hostcall",
+	1721:  "caicci",
+	1722:  "hks-lm",
+	1723:  "pptp",
+	1724:  "csbphonemaster",
+	1725:  "iden-ralp",
+	1726:  "iberiagames",
+	1727:  "winddx",
+	1728:  "telindus",
+	1729:  "citynl",
+	1730:  "roketz",
+	1731:  "msiccp",
+	1732:  "proxim",
+	1733:  "siipat",
+	1734:  "cambertx-lm",
+	1735:  "privatechat",
+	1736:  "street-stream",
+	1737:  "ultimad",
+	1738:  "gamegen1",
+	1739:  "webaccess",
+	1740:  "encore",
+	1741:  "cisco-net-mgmt",
+	1742:  "3Com-nsd",
+	1743:  "cinegrfx-lm",
+	1744:  "ncpm-ft",
+	1745:  "remote-winsock",
+	1746:  "ftrapid-1",
+	1747:  "ftrapid-2",
+	1748:  "oracle-em1",
+	1749:  "aspen-services",
+	1750:  "sslp",
+	1751:  "swiftnet",
+	1752:  "lofr-lm",
+	1753:  "predatar-comms",
+	1754:  "oracle-em2",
+	1755:  "ms-streaming",
+	1756:  "capfast-lmd",
+	1757:  "cnhrp",
+	1758:  "tftp-mcast",
+	1759:  "spss-lm",
+	1760:  "www-ldap-gw",
+	1761:  "cft-0",
+	1762:  "cft-1",
+	1763:  "cft-2",
+	1764:  "cft-3",
+	1765:  "cft-4",
+	1766:  "cft-5",
+	1767:  "cft-6",
+	1768:  "cft-7",
+	1769:  "bmc-net-adm",
+	1770:  "bmc-net-svc",
+	1771:  "vaultbase",
+	1772:  "essweb-gw",
+	1773:  "kmscontrol",
+	1774:  "global-dtserv",
+	1775:  "vdab",
+	1776:  "femis",
+	1777:  "powerguardian",
+	1778:  "prodigy-intrnet",
+	1779:  "pharmasoft",
+	1780:  "dpkeyserv",
+	1781:  "answersoft-lm",
+	1782:  "hp-hcip",
+	1784:  "finle-lm",
+	1785:  "windlm",
+	1786:  "funk-logger",
+	1787:  "funk-license",
+	1788:  "psmond",
+	1789:  "hello",
+	1790:  "nmsp",
+	1791:  "ea1",
+	1792:  "ibm-dt-2",
+	1793:  "rsc-robot",
+	1794:  "cera-bcm",
+	1795:  "dpi-proxy",
+	1796:  "vocaltec-admin",
+	1797:  "uma",
+	1798:  "etp",
+	1799:  "netrisk",
+	1800:  "ansys-lm",
+	1801:  "msmq",
+	1802:  "concomp1",
+	1803:  "hp-hcip-gwy",
+	1804:  "enl",
+	1805:  "enl-name",
+	1806:  "musiconline",
+	1807:  "fhsp",
+	1808:  "oracle-vp2",
+	1809:  "oracle-vp1",
+	1810:  "jerand-lm",
+	1811:  "scientia-sdb",
+	1812:  "radius",
+	1813:  "radius-acct",
+	1814:  "tdp-suite",
+	1815:  "mmpft",
+	1816:  "harp",
+	1817:  "rkb-oscs",
+	1818:  "etftp",
+	1819:  "plato-lm",
+	1820:  "mcagent",
+	1821:  "donnyworld",
+	1822:  "es-elmd",
+	1823:  "unisys-lm",
+	1824:  "metrics-pas",
+	1825:  "direcpc-video",
+	1826:  "ardt",
+	1827:  "asi",
+	1828:  "itm-mcell-u",
+	1829:  "optika-emedia",
+	1830:  "net8-cman",
+	1831:  "myrtle",
+	1832:  "tht-treasure",
+	1833:  "udpradio",
+	1834:  "ardusuni",
+	1835:  "ardusmul",
+	1836:  "ste-smsc",
+	1837:  "csoft1",
+	1838:  "talnet",
+	1839:  "netopia-vo1",
+	1840:  "netopia-vo2",
+	1841:  "netopia-vo3",
+	1842:  "netopia-vo4",
+	1843:  "netopia-vo5",
+	1844:  "direcpc-dll",
+	1845:  "altalink",
+	1846:  "tunstall-pnc",
+	1847:  "slp-notify",
+	1848:  "fjdocdist",
+	1849:  "alpha-sms",
+	1850:  "gsi",
+	1851:  "ctcd",
+	1852:  "virtual-time",
+	1853:  "vids-avtp",
+	1854:  "buddy-draw",
+	1855:  "fiorano-rtrsvc",
+	1856:  "fiorano-msgsvc",
+	1857:  "datacaptor",
+	1858:  "privateark",
+	1859:  "gammafetchsvr",
+	1860:  "sunscalar-svc",
+	1861:  "lecroy-vicp",
+	1862:  "mysql-cm-agent",
+	1863:  "msnp",
+	1864:  "paradym-31port",
+	1865:  "entp",
+	1866:  "swrmi",
+	1867:  "udrive",
+	1868:  "viziblebrowser",
+	1869:  "transact",
+	1870:  "sunscalar-dns",
+	1871:  "canocentral0",
+	1872:  "canocentral1",
+	1873:  "fjmpjps",
+	1874:  "fjswapsnp",
+	1875:  "westell-stats",
+	1876:  "ewcappsrv",
+	1877:  "hp-webqosdb",
+	1878:  "drmsmc",
+	1879:  "nettgain-nms",
+	1880:  "vsat-control",
+	1881:  "ibm-mqseries2",
+	1882:  "ecsqdmn",
+	1883:  "mqtt",
+	1884:  "idmaps",
+	1885:  "vrtstrapserver",
+	1886:  "leoip",
+	1887:  "filex-lport",
+	1888:  "ncconfig",
+	1889:  "unify-adapter",
+	1890:  "wilkenlistener",
+	1891:  "childkey-notif",
+	1892:  "childkey-ctrl",
+	1893:  "elad",
+	1894:  "o2server-port",
+	1896:  "b-novative-ls",
+	1897:  "metaagent",
+	1898:  "cymtec-port",
+	1899:  "mc2studios",
+	1900:  "ssdp",
+	1901:  "fjicl-tep-a",
+	1902:  "fjicl-tep-b",
+	1903:  "linkname",
+	1904:  "fjicl-tep-c",
+	1905:  "sugp",
+	1906:  "tpmd",
+	1907:  "intrastar",
+	1908:  "dawn",
+	1909:  "global-wlink",
+	1910:  "ultrabac",
+	1911:  "mtp",
+	1912:  "rhp-iibp",
+	1913:  "armadp",
+	1914:  "elm-momentum",
+	1915:  "facelink",
+	1916:  "persona",
+	1917:  "noagent",
+	1918:  "can-nds",
+	1919:  "can-dch",
+	1920:  "can-ferret",
+	1921:  "noadmin",
+	1922:  "tapestry",
+	1923:  "spice",
+	1924:  "xiip",
+	1925:  "discovery-port",
+	1926:  "egs",
+	1927:  "videte-cipc",
+	1928:  "emsd-port",
+	1929:  "bandwiz-system",
+	1930:  "driveappserver",
+	1931:  "amdsched",
+	1932:  "ctt-broker",
+	1933:  "xmapi",
+	1934:  "xaapi",
+	1935:  "macromedia-fcs",
+	1936:  "jetcmeserver",
+	1937:  "jwserver",
+	1938:  "jwclient",
+	1939:  "jvserver",
+	1940:  "jvclient",
+	1941:  "dic-aida",
+	1942:  "res",
+	1943:  "beeyond-media",
+	1944:  "close-combat",
+	1945:  "dialogic-elmd",
+	1946:  "tekpls",
+	1947:  "sentinelsrm",
+	1948:  "eye2eye",
+	1949:  "ismaeasdaqlive",
+	1950:  "ismaeasdaqtest",
+	1951:  "bcs-lmserver",
+	1952:  "mpnjsc",
+	1953:  "rapidbase",
+	1954:  "abr-api",
+	1955:  "abr-secure",
+	1956:  "vrtl-vmf-ds",
+	1957:  "unix-status",
+	1958:  "dxadmind",
+	1959:  "simp-all",
+	1960:  "nasmanager",
+	1961:  "bts-appserver",
+	1962:  "biap-mp",
+	1963:  "webmachine",
+	1964:  "solid-e-engine",
+	1965:  "tivoli-npm",
+	1966:  "slush",
+	1967:  "sns-quote",
+	1968:  "lipsinc",
+	1969:  "lipsinc1",
+	1970:  "netop-rc",
+	1971:  "netop-school",
+	1972:  "intersys-cache",
+	1973:  "dlsrap",
+	1974:  "drp",
+	1975:  "tcoflashagent",
+	1976:  "tcoregagent",
+	1977:  "tcoaddressbook",
+	1978:  "unisql",
+	1979:  "unisql-java",
+	1980:  "pearldoc-xact",
+	1981:  "p2pq",
+	1982:  "estamp",
+	1983:  "lhtp",
+	1984:  "bb",
+	1985:  "hsrp",
+	1986:  "licensedaemon",
+	1987:  "tr-rsrb-p1",
+	1988:  "tr-rsrb-p2",
+	1989:  "tr-rsrb-p3",
+	1990:  "stun-p1",
+	1991:  "stun-p2",
+	1992:  "stun-p3",
+	1993:  "snmp-tcp-port",
+	1994:  "stun-port",
+	1995:  "perf-port",
+	1996:  "tr-rsrb-port",
+	1997:  "gdp-port",
+	1998:  "x25-svc-port",
+	1999:  "tcp-id-port",
+	2000:  "cisco-sccp",
+	2001:  "dc",
+	2002:  "globe",
+	2003:  "brutus",
+	2004:  "mailbox",
+	2005:  "berknet",
+	2006:  "invokator",
+	2007:  "dectalk",
+	2008:  "conf",
+	2009:  "news",
+	2010:  "search",
+	2011:  "raid-cc",
+	2012:  "ttyinfo",
+	2013:  "raid-am",
+	2014:  "troff",
+	2015:  "cypress",
+	2016:  "bootserver",
+	2017:  "cypress-stat",
+	2018:  "terminaldb",
+	2019:  "whosockami",
+	2020:  "xinupageserver",
+	2021:  "servexec",
+	2022:  "down",
+	2023:  "xinuexpansion3",
+	2024:  "xinuexpansion4",
+	2025:  "ellpack",
+	2026:  "scrabble",
+	2027:  "shadowserver",
+	2028:  "submitserver",
+	2029:  "hsrpv6",
+	2030:  "device2",
+	2031:  "mobrien-chat",
+	2032:  "blackboard",
+	2033:  "glogger",
+	2034:  "scoremgr",
+	2035:  "imsldoc",
+	2036:  "e-dpnet",
+	2037:  "applus",
+	2038:  "objectmanager",
+	2039:  "prizma",
+	2040:  "lam",
+	2041:  "interbase",
+	2042:  "isis",
+	2043:  "isis-bcast",
+	2044:  "rimsl",
+	2045:  "cdfunc",
+	2046:  "sdfunc",
+	2047:  "dls",
+	2048:  "dls-monitor",
+	2049:  "shilp",
+	2050:  "av-emb-config",
+	2051:  "epnsdp",
+	2052:  "clearvisn",
+	2053:  "lot105-ds-upd",
+	2054:  "weblogin",
+	2055:  "iop",
+	2056:  "omnisky",
+	2057:  "rich-cp",
+	2058:  "newwavesearch",
+	2059:  "bmc-messaging",
+	2060:  "teleniumdaemon",
+	2061:  "netmount",
+	2062:  "icg-swp",
+	2063:  "icg-bridge",
+	2064:  "icg-iprelay",
+	2065:  "dlsrpn",
+	2066:  "aura",
+	2067:  "dlswpn",
+	2068:  "avauthsrvprtcl",
+	2069:  "event-port",
+	2070:  "ah-esp-encap",
+	2071:  "acp-port",
+	2072:  "msync",
+	2073:  "gxs-data-port",
+	2074:  "vrtl-vmf-sa",
+	2075:  "newlixengine",
+	2076:  "newlixconfig",
+	2077:  "tsrmagt",
+	2078:  "tpcsrvr",
+	2079:  "idware-router",
+	2080:  "autodesk-nlm",
+	2081:  "kme-trap-port",
+	2082:  "infowave",
+	2083:  "radsec",
+	2084:  "sunclustergeo",
+	2085:  "ada-cip",
+	2086:  "gnunet",
+	2087:  "eli",
+	2088:  "ip-blf",
+	2089:  "sep",
+	2090:  "lrp",
+	2091:  "prp",
+	2092:  "descent3",
+	2093:  "nbx-cc",
+	2094:  "nbx-au",
+	2095:  "nbx-ser",
+	2096:  "nbx-dir",
+	2097:  "jetformpreview",
+	2098:  "dialog-port",
+	2099:  "h2250-annex-g",
+	2100:  "amiganetfs",
+	2101:  "rtcm-sc104",
+	2102:  "zephyr-srv",
+	2103:  "zephyr-clt",
+	2104:  "zephyr-hm",
+	2105:  "minipay",
+	2106:  "mzap",
+	2107:  "bintec-admin",
+	2108:  "comcam",
+	2109:  "ergolight",
+	2110:  "umsp",
+	2111:  "dsatp",
+	2112:  "idonix-metanet",
+	2113:  "hsl-storm",
+	2114:  "newheights",
+	2115:  "kdm",
+	2116:  "ccowcmr",
+	2117:  "mentaclient",
+	2118:  "mentaserver",
+	2119:  "gsigatekeeper",
+	2120:  "qencp",
+	2121:  "scientia-ssdb",
+	2122:  "caupc-remote",
+	2123:  "gtp-control",
+	2124:  "elatelink",
+	2125:  "lockstep",
+	2126:  "pktcable-cops",
+	2127:  "index-pc-wb",
+	2128:  "net-steward",
+	2129:  "cs-live",
+	2130:  "xds",
+	2131:  "avantageb2b",
+	2132:  "solera-epmap",
+	2133:  "zymed-zpp",
+	2134:  "avenue",
+	2135:  "gris",
+	2136:  "appworxsrv",
+	2137:  "connect",
+	2138:  "unbind-cluster",
+	2139:  "ias-auth",
+	2140:  "ias-reg",
+	2141:  "ias-admind",
+	2142:  "tdmoip",
+	2143:  "lv-jc",
+	2144:  "lv-ffx",
+	2145:  "lv-pici",
+	2146:  "lv-not",
+	2147:  "lv-auth",
+	2148:  "veritas-ucl",
+	2149:  "acptsys",
+	2150:  "dynamic3d",
+	2151:  "docent",
+	2152:  "gtp-user",
+	2153:  "ctlptc",
+	2154:  "stdptc",
+	2155:  "brdptc",
+	2156:  "trp",
+	2157:  "xnds",
+	2158:  "touchnetplus",
+	2159:  "gdbremote",
+	2160:  "apc-2160",
+	2161:  "apc-2161",
+	2162:  "navisphere",
+	2163:  "navisphere-sec",
+	2164:  "ddns-v3",
+	2165:  "x-bone-api",
+	2166:  "iwserver",
+	2167:  "raw-serial",
+	2168:  "easy-soft-mux",
+	2169:  "brain",
+	2170:  "eyetv",
+	2171:  "msfw-storage",
+	2172:  "msfw-s-storage",
+	2173:  "msfw-replica",
+	2174:  "msfw-array",
+	2175:  "airsync",
+	2176:  "rapi",
+	2177:  "qwave",
+	2178:  "bitspeer",
+	2179:  "vmrdp",
+	2180:  "mc-gt-srv",
+	2181:  "eforward",
+	2182:  "cgn-stat",
+	2183:  "cgn-config",
+	2184:  "nvd",
+	2185:  "onbase-dds",
+	2186:  "gtaua",
+	2187:  "ssmc",
+	2188:  "radware-rpm",
+	2189:  "radware-rpm-s",
+	2190:  "tivoconnect",
+	2191:  "tvbus",
+	2192:  "asdis",
+	2193:  "drwcs",
+	2197:  "mnp-exchange",
+	2198:  "onehome-remote",
+	2199:  "onehome-help",
+	2200:  "ici",
+	2201:  "ats",
+	2202:  "imtc-map",
+	2203:  "b2-runtime",
+	2204:  "b2-license",
+	2205:  "jps",
+	2206:  "hpocbus",
+	2207:  "hpssd",
+	2208:  "hpiod",
+	2209:  "rimf-ps",
+	2210:  "noaaport",
+	2211:  "emwin",
+	2212:  "leecoposserver",
+	2213:  "kali",
+	2214:  "rpi",
+	2215:  "ipcore",
+	2216:  "vtu-comms",
+	2217:  "gotodevice",
+	2218:  "bounzza",
+	2219:  "netiq-ncap",
+	2220:  "netiq",
+	2221:  "ethernet-ip-s",
+	2222:  "EtherNet-IP-1",
+	2223:  "rockwell-csp2",
+	2224:  "efi-mg",
+	2225:  "rcip-itu",
+	2226:  "di-drm",
+	2227:  "di-msg",
+	2228:  "ehome-ms",
+	2229:  "datalens",
+	2230:  "queueadm",
+	2231:  "wimaxasncp",
+	2232:  "ivs-video",
+	2233:  "infocrypt",
+	2234:  "directplay",
+	2235:  "sercomm-wlink",
+	2236:  "nani",
+	2237:  "optech-port1-lm",
+	2238:  "aviva-sna",
+	2239:  "imagequery",
+	2240:  "recipe",
+	2241:  "ivsd",
+	2242:  "foliocorp",
+	2243:  "magicom",
+	2244:  "nmsserver",
+	2245:  "hao",
+	2246:  "pc-mta-addrmap",
+	2247:  "antidotemgrsvr",
+	2248:  "ums",
+	2249:  "rfmp",
+	2250:  "remote-collab",
+	2251:  "dif-port",
+	2252:  "njenet-ssl",
+	2253:  "dtv-chan-req",
+	2254:  "seispoc",
+	2255:  "vrtp",
+	2256:  "pcc-mfp",
+	2257:  "simple-tx-rx",
+	2258:  "rcts",
+	2260:  "apc-2260",
+	2261:  "comotionmaster",
+	2262:  "comotionback",
+	2263:  "ecwcfg",
+	2264:  "apx500api-1",
+	2265:  "apx500api-2",
+	2266:  "mfserver",
+	2267:  "ontobroker",
+	2268:  "amt",
+	2269:  "mikey",
+	2270:  "starschool",
+	2271:  "mmcals",
+	2272:  "mmcal",
+	2273:  "mysql-im",
+	2274:  "pcttunnell",
+	2275:  "ibridge-data",
+	2276:  "ibridge-mgmt",
+	2277:  "bluectrlproxy",
+	2278:  "s3db",
+	2279:  "xmquery",
+	2280:  "lnvpoller",
+	2281:  "lnvconsole",
+	2282:  "lnvalarm",
+	2283:  "lnvstatus",
+	2284:  "lnvmaps",
+	2285:  "lnvmailmon",
+	2286:  "nas-metering",
+	2287:  "dna",
+	2288:  "netml",
+	2289:  "dict-lookup",
+	2290:  "sonus-logging",
+	2291:  "eapsp",
+	2292:  "mib-streaming",
+	2293:  "npdbgmngr",
+	2294:  "konshus-lm",
+	2295:  "advant-lm",
+	2296:  "theta-lm",
+	2297:  "d2k-datamover1",
+	2298:  "d2k-datamover2",
+	2299:  "pc-telecommute",
+	2300:  "cvmmon",
+	2301:  "cpq-wbem",
+	2302:  "binderysupport",
+	2303:  "proxy-gateway",
+	2304:  "attachmate-uts",
+	2305:  "mt-scaleserver",
+	2306:  "tappi-boxnet",
+	2307:  "pehelp",
+	2308:  "sdhelp",
+	2309:  "sdserver",
+	2310:  "sdclient",
+	2311:  "messageservice",
+	2312:  "wanscaler",
+	2313:  "iapp",
+	2314:  "cr-websystems",
+	2315:  "precise-sft",
+	2316:  "sent-lm",
+	2317:  "attachmate-g32",
+	2318:  "cadencecontrol",
+	2319:  "infolibria",
+	2320:  "siebel-ns",
+	2321:  "rdlap",
+	2322:  "ofsd",
+	2323:  "3d-nfsd",
+	2324:  "cosmocall",
+	2325:  "ansysli",
+	2326:  "idcp",
+	2327:  "xingcsm",
+	2328:  "netrix-sftm",
+	2329:  "nvd",
+	2330:  "tscchat",
+	2331:  "agentview",
+	2332:  "rcc-host",
+	2333:  "snapp",
+	2334:  "ace-client",
+	2335:  "ace-proxy",
+	2336:  "appleugcontrol",
+	2337:  "ideesrv",
+	2338:  "norton-lambert",
+	2339:  "3com-webview",
+	2340:  "wrs-registry",
+	2341:  "xiostatus",
+	2342:  "manage-exec",
+	2343:  "nati-logos",
+	2344:  "fcmsys",
+	2345:  "dbm",
+	2346:  "redstorm-join",
+	2347:  "redstorm-find",
+	2348:  "redstorm-info",
+	2349:  "redstorm-diag",
+	2350:  "psbserver",
+	2351:  "psrserver",
+	2352:  "pslserver",
+	2353:  "pspserver",
+	2354:  "psprserver",
+	2355:  "psdbserver",
+	2356:  "gxtelmd",
+	2357:  "unihub-server",
+	2358:  "futrix",
+	2359:  "flukeserver",
+	2360:  "nexstorindltd",
+	2361:  "tl1",
+	2362:  "digiman",
+	2363:  "mediacntrlnfsd",
+	2364:  "oi-2000",
+	2365:  "dbref",
+	2366:  "qip-login",
+	2367:  "service-ctrl",
+	2368:  "opentable",
+	2370:  "l3-hbmon",
+	2371:  "hp-rda",
+	2372:  "lanmessenger",
+	2373:  "remographlm",
+	2374:  "hydra",
+	2375:  "docker",
+	2376:  "docker-s",
+	2377:  "swarm",
+	2379:  "etcd-client",
+	2380:  "etcd-server",
+	2381:  "compaq-https",
+	2382:  "ms-olap3",
+	2383:  "ms-olap4",
+	2384:  "sd-request",
+	2385:  "sd-data",
+	2386:  "virtualtape",
+	2387:  "vsamredirector",
+	2388:  "mynahautostart",
+	2389:  "ovsessionmgr",
+	2390:  "rsmtp",
+	2391:  "3com-net-mgmt",
+	2392:  "tacticalauth",
+	2393:  "ms-olap1",
+	2394:  "ms-olap2",
+	2395:  "lan900-remote",
+	2396:  "wusage",
+	2397:  "ncl",
+	2398:  "orbiter",
+	2399:  "fmpro-fdal",
+	2400:  "opequus-server",
+	2401:  "cvspserver",
+	2402:  "taskmaster2000",
+	2403:  "taskmaster2000",
+	2404:  "iec-104",
+	2405:  "trc-netpoll",
+	2406:  "jediserver",
+	2407:  "orion",
+	2408:  "railgun-webaccl",
+	2409:  "sns-protocol",
+	2410:  "vrts-registry",
+	2411:  "netwave-ap-mgmt",
+	2412:  "cdn",
+	2413:  "orion-rmi-reg",
+	2414:  "beeyond",
+	2415:  "codima-rtp",
+	2416:  "rmtserver",
+	2417:  "composit-server",
+	2418:  "cas",
+	2419:  "attachmate-s2s",
+	2420:  "dslremote-mgmt",
+	2421:  "g-talk",
+	2422:  "crmsbits",
+	2423:  "rnrp",
+	2424:  "kofax-svr",
+	2425:  "fjitsuappmgr",
+	2426:  "vcmp",
+	2427:  "mgcp-gateway",
+	2428:  "ott",
+	2429:  "ft-role",
+	2430:  "venus",
+	2431:  "venus-se",
+	2432:  "codasrv",
+	2433:  "codasrv-se",
+	2434:  "pxc-epmap",
+	2435:  "optilogic",
+	2436:  "topx",
+	2437:  "unicontrol",
+	2438:  "msp",
+	2439:  "sybasedbsynch",
+	2440:  "spearway",
+	2441:  "pvsw-inet",
+	2442:  "netangel",
+	2443:  "powerclientcsf",
+	2444:  "btpp2sectrans",
+	2445:  "dtn1",
+	2446:  "bues-service",
+	2447:  "ovwdb",
+	2448:  "hpppssvr",
+	2449:  "ratl",
+	2450:  "netadmin",
+	2451:  "netchat",
+	2452:  "snifferclient",
+	2453:  "madge-ltd",
+	2454:  "indx-dds",
+	2455:  "wago-io-system",
+	2456:  "altav-remmgt",
+	2457:  "rapido-ip",
+	2458:  "griffin",
+	2459:  "community",
+	2460:  "ms-theater",
+	2461:  "qadmifoper",
+	2462:  "qadmifevent",
+	2463:  "lsi-raid-mgmt",
+	2464:  "direcpc-si",
+	2465:  "lbm",
+	2466:  "lbf",
+	2467:  "high-criteria",
+	2468:  "qip-msgd",
+	2469:  "mti-tcs-comm",
+	2470:  "taskman-port",
+	2471:  "seaodbc",
+	2472:  "c3",
+	2473:  "aker-cdp",
+	2474:  "vitalanalysis",
+	2475:  "ace-server",
+	2476:  "ace-svr-prop",
+	2477:  "ssm-cvs",
+	2478:  "ssm-cssps",
+	2479:  "ssm-els",
+	2480:  "powerexchange",
+	2481:  "giop",
+	2482:  "giop-ssl",
+	2483:  "ttc",
+	2484:  "ttc-ssl",
+	2485:  "netobjects1",
+	2486:  "netobjects2",
+	2487:  "pns",
+	2488:  "moy-corp",
+	2489:  "tsilb",
+	2490:  "qip-qdhcp",
+	2491:  "conclave-cpp",
+	2492:  "groove",
+	2493:  "talarian-mqs",
+	2494:  "bmc-ar",
+	2495:  "fast-rem-serv",
+	2496:  "dirgis",
+	2497:  "quaddb",
+	2498:  "odn-castraq",
+	2499:  "unicontrol",
+	2500:  "rtsserv",
+	2501:  "rtsclient",
+	2502:  "kentrox-prot",
+	2503:  "nms-dpnss",
+	2504:  "wlbs",
+	2505:  "ppcontrol",
+	2506:  "jbroker",
+	2507:  "spock",
+	2508:  "jdatastore",
+	2509:  "fjmpss",
+	2510:  "fjappmgrbulk",
+	2511:  "metastorm",
+	2512:  "citrixima",
+	2513:  "citrixadmin",
+	2514:  "facsys-ntp",
+	2515:  "facsys-router",
+	2516:  "maincontrol",
+	2517:  "call-sig-trans",
+	2518:  "willy",
+	2519:  "globmsgsvc",
+	2520:  "pvsw",
+	2521:  "adaptecmgr",
+	2522:  "windb",
+	2523:  "qke-llc-v3",
+	2524:  "optiwave-lm",
+	2525:  "ms-v-worlds",
+	2526:  "ema-sent-lm",
+	2527:  "iqserver",
+	2528:  "ncr-ccl",
+	2529:  "utsftp",
+	2530:  "vrcommerce",
+	2531:  "ito-e-gui",
+	2532:  "ovtopmd",
+	2533:  "snifferserver",
+	2534:  "combox-web-acc",
+	2535:  "madcap",
+	2536:  "btpp2audctr1",
+	2537:  "upgrade",
+	2538:  "vnwk-prapi",
+	2539:  "vsiadmin",
+	2540:  "lonworks",
+	2541:  "lonworks2",
+	2542:  "udrawgraph",
+	2543:  "reftek",
+	2544:  "novell-zen",
+	2545:  "sis-emt",
+	2546:  "vytalvaultbrtp",
+	2547:  "vytalvaultvsmp",
+	2548:  "vytalvaultpipe",
+	2549:  "ipass",
+	2550:  "ads",
+	2551:  "isg-uda-server",
+	2552:  "call-logging",
+	2553:  "efidiningport",
+	2554:  "vcnet-link-v10",
+	2555:  "compaq-wcp",
+	2556:  "nicetec-nmsvc",
+	2557:  "nicetec-mgmt",
+	2558:  "pclemultimedia",
+	2559:  "lstp",
+	2560:  "labrat",
+	2561:  "mosaixcc",
+	2562:  "delibo",
+	2563:  "cti-redwood",
+	2564:  "hp-3000-telnet",
+	2565:  "coord-svr",
+	2566:  "pcs-pcw",
+	2567:  "clp",
+	2568:  "spamtrap",
+	2569:  "sonuscallsig",
+	2570:  "hs-port",
+	2571:  "cecsvc",
+	2572:  "ibp",
+	2573:  "trustestablish",
+	2574:  "blockade-bpsp",
+	2575:  "hl7",
+	2576:  "tclprodebugger",
+	2577:  "scipticslsrvr",
+	2578:  "rvs-isdn-dcp",
+	2579:  "mpfoncl",
+	2580:  "tributary",
+	2581:  "argis-te",
+	2582:  "argis-ds",
+	2583:  "mon",
+	2584:  "cyaserv",
+	2585:  "netx-server",
+	2586:  "netx-agent",
+	2587:  "masc",
+	2588:  "privilege",
+	2589:  "quartus-tcl",
+	2590:  "idotdist",
+	2591:  "maytagshuffle",
+	2592:  "netrek",
+	2593:  "mns-mail",
+	2594:  "dts",
+	2595:  "worldfusion1",
+	2596:  "worldfusion2",
+	2597:  "homesteadglory",
+	2598:  "citriximaclient",
+	2599:  "snapd",
+	2600:  "hpstgmgr",
+	2601:  "discp-client",
+	2602:  "discp-server",
+	2603:  "servicemeter",
+	2604:  "nsc-ccs",
+	2605:  "nsc-posa",
+	2606:  "netmon",
+	2607:  "connection",
+	2608:  "wag-service",
+	2609:  "system-monitor",
+	2610:  "versa-tek",
+	2611:  "lionhead",
+	2612:  "qpasa-agent",
+	2613:  "smntubootstrap",
+	2614:  "neveroffline",
+	2615:  "firepower",
+	2616:  "appswitch-emp",
+	2617:  "cmadmin",
+	2618:  "priority-e-com",
+	2619:  "bruce",
+	2620:  "lpsrecommender",
+	2621:  "miles-apart",
+	2622:  "metricadbc",
+	2623:  "lmdp",
+	2624:  "aria",
+	2625:  "blwnkl-port",
+	2626:  "gbjd816",
+	2627:  "moshebeeri",
+	2628:  "dict",
+	2629:  "sitaraserver",
+	2630:  "sitaramgmt",
+	2631:  "sitaradir",
+	2632:  "irdg-post",
+	2633:  "interintelli",
+	2634:  "pk-electronics",
+	2635:  "backburner",
+	2636:  "solve",
+	2637:  "imdocsvc",
+	2638:  "sybaseanywhere",
+	2639:  "aminet",
+	2640:  "ami-control",
+	2641:  "hdl-srv",
+	2642:  "tragic",
+	2643:  "gte-samp",
+	2644:  "travsoft-ipx-t",
+	2645:  "novell-ipx-cmd",
+	2646:  "and-lm",
+	2647:  "syncserver",
+	2648:  "upsnotifyprot",
+	2649:  "vpsipport",
+	2650:  "eristwoguns",
+	2651:  "ebinsite",
+	2652:  "interpathpanel",
+	2653:  "sonus",
+	2654:  "corel-vncadmin",
+	2655:  "unglue",
+	2656:  "kana",
+	2657:  "sns-dispatcher",
+	2658:  "sns-admin",
+	2659:  "sns-query",
+	2660:  "gcmonitor",
+	2661:  "olhost",
+	2662:  "bintec-capi",
+	2663:  "bintec-tapi",
+	2664:  "patrol-mq-gm",
+	2665:  "patrol-mq-nm",
+	2666:  "extensis",
+	2667:  "alarm-clock-s",
+	2668:  "alarm-clock-c",
+	2669:  "toad",
+	2670:  "tve-announce",
+	2671:  "newlixreg",
+	2672:  "nhserver",
+	2673:  "firstcall42",
+	2674:  "ewnn",
+	2675:  "ttc-etap",
+	2676:  "simslink",
+	2677:  "gadgetgate1way",
+	2678:  "gadgetgate2way",
+	2679:  "syncserverssl",
+	2680:  "pxc-sapxom",
+	2681:  "mpnjsomb",
+	2683:  "ncdloadbalance",
+	2684:  "mpnjsosv",
+	2685:  "mpnjsocl",
+	2686:  "mpnjsomg",
+	2687:  "pq-lic-mgmt",
+	2688:  "md-cg-http",
+	2689:  "fastlynx",
+	2690:  "hp-nnm-data",
+	2691:  "itinternet",
+	2692:  "admins-lms",
+	2694:  "pwrsevent",
+	2695:  "vspread",
+	2696:  "unifyadmin",
+	2697:  "oce-snmp-trap",
+	2698:  "mck-ivpip",
+	2699:  "csoft-plusclnt",
+	2700:  "tqdata",
+	2701:  "sms-rcinfo",
+	2702:  "sms-xfer",
+	2703:  "sms-chat",
+	2704:  "sms-remctrl",
+	2705:  "sds-admin",
+	2706:  "ncdmirroring",
+	2707:  "emcsymapiport",
+	2708:  "banyan-net",
+	2709:  "supermon",
+	2710:  "sso-service",
+	2711:  "sso-control",
+	2712:  "aocp",
+	2713:  "raventbs",
+	2714:  "raventdm",
+	2715:  "hpstgmgr2",
+	2716:  "inova-ip-disco",
+	2717:  "pn-requester",
+	2718:  "pn-requester2",
+	2719:  "scan-change",
+	2720:  "wkars",
+	2721:  "smart-diagnose",
+	2722:  "proactivesrvr",
+	2723:  "watchdog-nt",
+	2724:  "qotps",
+	2725:  "msolap-ptp2",
+	2726:  "tams",
+	2727:  "mgcp-callagent",
+	2728:  "sqdr",
+	2729:  "tcim-control",
+	2730:  "nec-raidplus",
+	2731:  "fyre-messanger",
+	2732:  "g5m",
+	2733:  "signet-ctf",
+	2734:  "ccs-software",
+	2735:  "netiq-mc",
+	2736:  "radwiz-nms-srv",
+	2737:  "srp-feedback",
+	2738:  "ndl-tcp-ois-gw",
+	2739:  "tn-timing",
+	2740:  "alarm",
+	2741:  "tsb",
+	2742:  "tsb2",
+	2743:  "murx",
+	2744:  "honyaku",
+	2745:  "urbisnet",
+	2746:  "cpudpencap",
+	2747:  "fjippol-swrly",
+	2748:  "fjippol-polsvr",
+	2749:  "fjippol-cnsl",
+	2750:  "fjippol-port1",
+	2751:  "fjippol-port2",
+	2752:  "rsisysaccess",
+	2753:  "de-spot",
+	2754:  "apollo-cc",
+	2755:  "expresspay",
+	2756:  "simplement-tie",
+	2757:  "cnrp",
+	2758:  "apollo-status",
+	2759:  "apollo-gms",
+	2760:  "sabams",
+	2761:  "dicom-iscl",
+	2762:  "dicom-tls",
+	2763:  "desktop-dna",
+	2764:  "data-insurance",
+	2765:  "qip-audup",
+	2766:  "compaq-scp",
+	2767:  "uadtc",
+	2768:  "uacs",
+	2769:  "exce",
+	2770:  "veronica",
+	2771:  "vergencecm",
+	2772:  "auris",
+	2773:  "rbakcup1",
+	2774:  "rbakcup2",
+	2775:  "smpp",
+	2776:  "ridgeway1",
+	2777:  "ridgeway2",
+	2778:  "gwen-sonya",
+	2779:  "lbc-sync",
+	2780:  "lbc-control",
+	2781:  "whosells",
+	2782:  "everydayrc",
+	2783:  "aises",
+	2784:  "www-dev",
+	2785:  "aic-np",
+	2786:  "aic-oncrpc",
+	2787:  "piccolo",
+	2788:  "fryeserv",
+	2789:  "media-agent",
+	2790:  "plgproxy",
+	2791:  "mtport-regist",
+	2792:  "f5-globalsite",
+	2793:  "initlsmsad",
+	2795:  "livestats",
+	2796:  "ac-tech",
+	2797:  "esp-encap",
+	2798:  "tmesis-upshot",
+	2799:  "icon-discover",
+	2800:  "acc-raid",
+	2801:  "igcp",
+	2802:  "veritas-tcp1",
+	2803:  "btprjctrl",
+	2804:  "dvr-esm",
+	2805:  "wta-wsp-s",
+	2806:  "cspuni",
+	2807:  "cspmulti",
+	2808:  "j-lan-p",
+	2809:  "corbaloc",
+	2810:  "netsteward",
+	2811:  "gsiftp",
+	2812:  "atmtcp",
+	2813:  "llm-pass",
+	2814:  "llm-csv",
+	2815:  "lbc-measure",
+	2816:  "lbc-watchdog",
+	2817:  "nmsigport",
+	2818:  "rmlnk",
+	2819:  "fc-faultnotify",
+	2820:  "univision",
+	2821:  "vrts-at-port",
+	2822:  "ka0wuc",
+	2823:  "cqg-netlan",
+	2824:  "cqg-netlan-1",
+	2826:  "slc-systemlog",
+	2827:  "slc-ctrlrloops",
+	2828:  "itm-lm",
+	2829:  "silkp1",
+	2830:  "silkp2",
+	2831:  "silkp3",
+	2832:  "silkp4",
+	2833:  "glishd",
+	2834:  "evtp",
+	2835:  "evtp-data",
+	2836:  "catalyst",
+	2837:  "repliweb",
+	2838:  "starbot",
+	2839:  "nmsigport",
+	2840:  "l3-exprt",
+	2841:  "l3-ranger",
+	2842:  "l3-hawk",
+	2843:  "pdnet",
+	2844:  "bpcp-poll",
+	2845:  "bpcp-trap",
+	2846:  "aimpp-hello",
+	2847:  "aimpp-port-req",
+	2848:  "amt-blc-port",
+	2849:  "fxp",
+	2850:  "metaconsole",
+	2851:  "webemshttp",
+	2852:  "bears-01",
+	2853:  "ispipes",
+	2854:  "infomover",
+	2855:  "msrp",
+	2856:  "cesdinv",
+	2857:  "simctlp",
+	2858:  "ecnp",
+	2859:  "activememory",
+	2860:  "dialpad-voice1",
+	2861:  "dialpad-voice2",
+	2862:  "ttg-protocol",
+	2863:  "sonardata",
+	2864:  "astromed-main",
+	2865:  "pit-vpn",
+	2866:  "iwlistener",
+	2867:  "esps-portal",
+	2868:  "npep-messaging",
+	2869:  "icslap",
+	2870:  "daishi",
+	2871:  "msi-selectplay",
+	2872:  "radix",
+	2874:  "dxmessagebase1",
+	2875:  "dxmessagebase2",
+	2876:  "sps-tunnel",
+	2877:  "bluelance",
+	2878:  "aap",
+	2879:  "ucentric-ds",
+	2880:  "synapse",
+	2881:  "ndsp",
+	2882:  "ndtp",
+	2883:  "ndnp",
+	2884:  "flashmsg",
+	2885:  "topflow",
+	2886:  "responselogic",
+	2887:  "aironetddp",
+	2888:  "spcsdlobby",
+	2889:  "rsom",
+	2890:  "cspclmulti",
+	2891:  "cinegrfx-elmd",
+	2892:  "snifferdata",
+	2893:  "vseconnector",
+	2894:  "abacus-remote",
+	2895:  "natuslink",
+	2896:  "ecovisiong6-1",
+	2897:  "citrix-rtmp",
+	2898:  "appliance-cfg",
+	2899:  "powergemplus",
+	2900:  "quicksuite",
+	2901:  "allstorcns",
+	2902:  "netaspi",
+	2903:  "suitcase",
+	2904:  "m2ua",
+	2905:  "m3ua",
+	2906:  "caller9",
+	2907:  "webmethods-b2b",
+	2908:  "mao",
+	2909:  "funk-dialout",
+	2910:  "tdaccess",
+	2911:  "blockade",
+	2912:  "epicon",
+	2913:  "boosterware",
+	2914:  "gamelobby",
+	2915:  "tksocket",
+	2916:  "elvin-server",
+	2917:  "elvin-client",
+	2918:  "kastenchasepad",
+	2919:  "roboer",
+	2920:  "roboeda",
+	2921:  "cesdcdman",
+	2922:  "cesdcdtrn",
+	2923:  "wta-wsp-wtp-s",
+	2924:  "precise-vip",
+	2926:  "mobile-file-dl",
+	2927:  "unimobilectrl",
+	2928:  "redstone-cpss",
+	2929:  "amx-webadmin",
+	2930:  "amx-weblinx",
+	2931:  "circle-x",
+	2932:  "incp",
+	2933:  "4-tieropmgw",
+	2934:  "4-tieropmcli",
+	2935:  "qtp",
+	2936:  "otpatch",
+	2937:  "pnaconsult-lm",
+	2938:  "sm-pas-1",
+	2939:  "sm-pas-2",
+	2940:  "sm-pas-3",
+	2941:  "sm-pas-4",
+	2942:  "sm-pas-5",
+	2943:  "ttnrepository",
+	2944:  "megaco-h248",
+	2945:  "h248-binary",
+	2946:  "fjsvmpor",
+	2947:  "gpsd",
+	2948:  "wap-push",
+	2949:  "wap-pushsecure",
+	2950:  "esip",
+	2951:  "ottp",
+	2952:  "mpfwsas",
+	2953:  "ovalarmsrv",
+	2954:  "ovalarmsrv-cmd",
+	2955:  "csnotify",
+	2956:  "ovrimosdbman",
+	2957:  "jmact5",
+	2958:  "jmact6",
+	2959:  "rmopagt",
+	2960:  "dfoxserver",
+	2961:  "boldsoft-lm",
+	2962:  "iph-policy-cli",
+	2963:  "iph-policy-adm",
+	2964:  "bullant-srap",
+	2965:  "bullant-rap",
+	2966:  "idp-infotrieve",
+	2967:  "ssc-agent",
+	2968:  "enpp",
+	2969:  "essp",
+	2970:  "index-net",
+	2971:  "netclip",
+	2972:  "pmsm-webrctl",
+	2973:  "svnetworks",
+	2974:  "signal",
+	2975:  "fjmpcm",
+	2976:  "cns-srv-port",
+	2977:  "ttc-etap-ns",
+	2978:  "ttc-etap-ds",
+	2979:  "h263-video",
+	2980:  "wimd",
+	2981:  "mylxamport",
+	2982:  "iwb-whiteboard",
+	2983:  "netplan",
+	2984:  "hpidsadmin",
+	2985:  "hpidsagent",
+	2986:  "stonefalls",
+	2987:  "identify",
+	2988:  "hippad",
+	2989:  "zarkov",
+	2990:  "boscap",
+	2991:  "wkstn-mon",
+	2992:  "avenyo",
+	2993:  "veritas-vis1",
+	2994:  "veritas-vis2",
+	2995:  "idrs",
+	2996:  "vsixml",
+	2997:  "rebol",
+	2998:  "realsecure",
+	2999:  "remoteware-un",
+	3000:  "hbci",
+	3001:  "origo-native",
+	3002:  "exlm-agent",
+	3003:  "cgms",
+	3004:  "csoftragent",
+	3005:  "geniuslm",
+	3006:  "ii-admin",
+	3007:  "lotusmtap",
+	3008:  "midnight-tech",
+	3009:  "pxc-ntfy",
+	3010:  "gw",
+	3011:  "trusted-web",
+	3012:  "twsdss",
+	3013:  "gilatskysurfer",
+	3014:  "broker-service",
+	3015:  "nati-dstp",
+	3016:  "notify-srvr",
+	3017:  "event-listener",
+	3018:  "srvc-registry",
+	3019:  "resource-mgr",
+	3020:  "cifs",
+	3021:  "agriserver",
+	3022:  "csregagent",
+	3023:  "magicnotes",
+	3024:  "nds-sso",
+	3025:  "arepa-raft",
+	3026:  "agri-gateway",
+	3027:  "LiebDevMgmt-C",
+	3028:  "LiebDevMgmt-DM",
+	3029:  "LiebDevMgmt-A",
+	3030:  "arepa-cas",
+	3031:  "eppc",
+	3032:  "redwood-chat",
+	3033:  "pdb",
+	3034:  "osmosis-aeea",
+	3035:  "fjsv-gssagt",
+	3036:  "hagel-dump",
+	3037:  "hp-san-mgmt",
+	3038:  "santak-ups",
+	3039:  "cogitate",
+	3040:  "tomato-springs",
+	3041:  "di-traceware",
+	3042:  "journee",
+	3043:  "brp",
+	3044:  "epp",
+	3045:  "responsenet",
+	3046:  "di-ase",
+	3047:  "hlserver",
+	3048:  "pctrader",
+	3049:  "nsws",
+	3050:  "gds-db",
+	3051:  "galaxy-server",
+	3052:  "apc-3052",
+	3053:  "dsom-server",
+	3054:  "amt-cnf-prot",
+	3055:  "policyserver",
+	3056:  "cdl-server",
+	3057:  "goahead-fldup",
+	3058:  "videobeans",
+	3059:  "qsoft",
+	3060:  "interserver",
+	3061:  "cautcpd",
+	3062:  "ncacn-ip-tcp",
+	3063:  "ncadg-ip-udp",
+	3064:  "rprt",
+	3065:  "slinterbase",
+	3066:  "netattachsdmp",
+	3067:  "fjhpjp",
+	3068:  "ls3bcast",
+	3069:  "ls3",
+	3070:  "mgxswitch",
+	3071:  "xplat-replicate",
+	3072:  "csd-monitor",
+	3073:  "vcrp",
+	3074:  "xbox",
+	3075:  "orbix-locator",
+	3076:  "orbix-config",
+	3077:  "orbix-loc-ssl",
+	3078:  "orbix-cfg-ssl",
+	3079:  "lv-frontpanel",
+	3080:  "stm-pproc",
+	3081:  "tl1-lv",
+	3082:  "tl1-raw",
+	3083:  "tl1-telnet",
+	3084:  "itm-mccs",
+	3085:  "pcihreq",
+	3086:  "jdl-dbkitchen",
+	3087:  "asoki-sma",
+	3088:  "xdtp",
+	3089:  "ptk-alink",
+	3090:  "stss",
+	3091:  "1ci-smcs",
+	3093:  "rapidmq-center",
+	3094:  "rapidmq-reg",
+	3095:  "panasas",
+	3096:  "ndl-aps",
+	3098:  "umm-port",
+	3099:  "chmd",
+	3100:  "opcon-xps",
+	3101:  "hp-pxpib",
+	3102:  "slslavemon",
+	3103:  "autocuesmi",
+	3104:  "autocuelog",
+	3105:  "cardbox",
+	3106:  "cardbox-http",
+	3107:  "business",
+	3108:  "geolocate",
+	3109:  "personnel",
+	3110:  "sim-control",
+	3111:  "wsynch",
+	3112:  "ksysguard",
+	3113:  "cs-auth-svr",
+	3114:  "ccmad",
+	3115:  "mctet-master",
+	3116:  "mctet-gateway",
+	3117:  "mctet-jserv",
+	3118:  "pkagent",
+	3119:  "d2000kernel",
+	3120:  "d2000webserver",
+	3121:  "pcmk-remote",
+	3122:  "vtr-emulator",
+	3123:  "edix",
+	3124:  "beacon-port",
+	3125:  "a13-an",
+	3127:  "ctx-bridge",
+	3128:  "ndl-aas",
+	3129:  "netport-id",
+	3130:  "icpv2",
+	3131:  "netbookmark",
+	3132:  "ms-rule-engine",
+	3133:  "prism-deploy",
+	3134:  "ecp",
+	3135:  "peerbook-port",
+	3136:  "grubd",
+	3137:  "rtnt-1",
+	3138:  "rtnt-2",
+	3139:  "incognitorv",
+	3140:  "ariliamulti",
+	3141:  "vmodem",
+	3142:  "rdc-wh-eos",
+	3143:  "seaview",
+	3144:  "tarantella",
+	3145:  "csi-lfap",
+	3146:  "bears-02",
+	3147:  "rfio",
+	3148:  "nm-game-admin",
+	3149:  "nm-game-server",
+	3150:  "nm-asses-admin",
+	3151:  "nm-assessor",
+	3152:  "feitianrockey",
+	3153:  "s8-client-port",
+	3154:  "ccmrmi",
+	3155:  "jpegmpeg",
+	3156:  "indura",
+	3157:  "e3consultants",
+	3158:  "stvp",
+	3159:  "navegaweb-port",
+	3160:  "tip-app-server",
+	3161:  "doc1lm",
+	3162:  "sflm",
+	3163:  "res-sap",
+	3164:  "imprs",
+	3165:  "newgenpay",
+	3166:  "sossecollector",
+	3167:  "nowcontact",
+	3168:  "poweronnud",
+	3169:  "serverview-as",
+	3170:  "serverview-asn",
+	3171:  "serverview-gf",
+	3172:  "serverview-rm",
+	3173:  "serverview-icc",
+	3174:  "armi-server",
+	3175:  "t1-e1-over-ip",
+	3176:  "ars-master",
+	3177:  "phonex-port",
+	3178:  "radclientport",
+	3179:  "h2gf-w-2m",
+	3180:  "mc-brk-srv",
+	3181:  "bmcpatrolagent",
+	3182:  "bmcpatrolrnvu",
+	3183:  "cops-tls",
+	3184:  "apogeex-port",
+	3185:  "smpppd",
+	3186:  "iiw-port",
+	3187:  "odi-port",
+	3188:  "brcm-comm-port",
+	3189:  "pcle-infex",
+	3190:  "csvr-proxy",
+	3191:  "csvr-sslproxy",
+	3192:  "firemonrcc",
+	3193:  "spandataport",
+	3194:  "magbind",
+	3195:  "ncu-1",
+	3196:  "ncu-2",
+	3197:  "embrace-dp-s",
+	3198:  "embrace-dp-c",
+	3199:  "dmod-workspace",
+	3200:  "tick-port",
+	3201:  "cpq-tasksmart",
+	3202:  "intraintra",
+	3203:  "netwatcher-mon",
+	3204:  "netwatcher-db",
+	3205:  "isns",
+	3206:  "ironmail",
+	3207:  "vx-auth-port",
+	3208:  "pfu-prcallback",
+	3209:  "netwkpathengine",
+	3210:  "flamenco-proxy",
+	3211:  "avsecuremgmt",
+	3212:  "surveyinst",
+	3213:  "neon24x7",
+	3214:  "jmq-daemon-1",
+	3215:  "jmq-daemon-2",
+	3216:  "ferrari-foam",
+	3217:  "unite",
+	3218:  "smartpackets",
+	3219:  "wms-messenger",
+	3220:  "xnm-ssl",
+	3221:  "xnm-clear-text",
+	3222:  "glbp",
+	3223:  "digivote",
+	3224:  "aes-discovery",
+	3225:  "fcip-port",
+	3226:  "isi-irp",
+	3227:  "dwnmshttp",
+	3228:  "dwmsgserver",
+	3229:  "global-cd-port",
+	3230:  "sftdst-port",
+	3231:  "vidigo",
+	3232:  "mdtp",
+	3233:  "whisker",
+	3234:  "alchemy",
+	3235:  "mdap-port",
+	3236:  "apparenet-ts",
+	3237:  "apparenet-tps",
+	3238:  "apparenet-as",
+	3239:  "apparenet-ui",
+	3240:  "triomotion",
+	3241:  "sysorb",
+	3242:  "sdp-id-port",
+	3243:  "timelot",
+	3244:  "onesaf",
+	3245:  "vieo-fe",
+	3246:  "dvt-system",
+	3247:  "dvt-data",
+	3248:  "procos-lm",
+	3249:  "ssp",
+	3250:  "hicp",
+	3251:  "sysscanner",
+	3252:  "dhe",
+	3253:  "pda-data",
+	3254:  "pda-sys",
+	3255:  "semaphore",
+	3256:  "cpqrpm-agent",
+	3257:  "cpqrpm-server",
+	3258:  "ivecon-port",
+	3259:  "epncdp2",
+	3260:  "iscsi-target",
+	3261:  "winshadow",
+	3262:  "necp",
+	3263:  "ecolor-imager",
+	3264:  "ccmail",
+	3265:  "altav-tunnel",
+	3266:  "ns-cfg-server",
+	3267:  "ibm-dial-out",
+	3268:  "msft-gc",
+	3269:  "msft-gc-ssl",
+	3270:  "verismart",
+	3271:  "csoft-prev",
+	3272:  "user-manager",
+	3273:  "sxmp",
+	3274:  "ordinox-server",
+	3275:  "samd",
+	3276:  "maxim-asics",
+	3277:  "awg-proxy",
+	3278:  "lkcmserver",
+	3279:  "admind",
+	3280:  "vs-server",
+	3281:  "sysopt",
+	3282:  "datusorb",
+	3283:  "Apple Remote Desktop (Net Assistant)",
+	3284:  "4talk",
+	3285:  "plato",
+	3286:  "e-net",
+	3287:  "directvdata",
+	3288:  "cops",
+	3289:  "enpc",
+	3290:  "caps-lm",
+	3291:  "sah-lm",
+	3292:  "cart-o-rama",
+	3293:  "fg-fps",
+	3294:  "fg-gip",
+	3295:  "dyniplookup",
+	3296:  "rib-slm",
+	3297:  "cytel-lm",
+	3298:  "deskview",
+	3299:  "pdrncs",
+	3300:  "ceph",
+	3302:  "mcs-fastmail",
+	3303:  "opsession-clnt",
+	3304:  "opsession-srvr",
+	3305:  "odette-ftp",
+	3306:  "mysql",
+	3307:  "opsession-prxy",
+	3308:  "tns-server",
+	3309:  "tns-adv",
+	3310:  "dyna-access",
+	3311:  "mcns-tel-ret",
+	3312:  "appman-server",
+	3313:  "uorb",
+	3314:  "uohost",
+	3315:  "cdid",
+	3316:  "aicc-cmi",
+	3317:  "vsaiport",
+	3318:  "ssrip",
+	3319:  "sdt-lmd",
+	3320:  "officelink2000",
+	3321:  "vnsstr",
+	3326:  "sftu",
+	3327:  "bbars",
+	3328:  "egptlm",
+	3329:  "hp-device-disc",
+	3330:  "mcs-calypsoicf",
+	3331:  "mcs-messaging",
+	3332:  "mcs-mailsvr",
+	3333:  "dec-notes",
+	3334:  "directv-web",
+	3335:  "directv-soft",
+	3336:  "directv-tick",
+	3337:  "directv-catlg",
+	3338:  "anet-b",
+	3339:  "anet-l",
+	3340:  "anet-m",
+	3341:  "anet-h",
+	3342:  "webtie",
+	3343:  "ms-cluster-net",
+	3344:  "bnt-manager",
+	3345:  "influence",
+	3346:  "trnsprntproxy",
+	3347:  "phoenix-rpc",
+	3348:  "pangolin-laser",
+	3349:  "chevinservices",
+	3350:  "findviatv",
+	3351:  "btrieve",
+	3352:  "ssql",
+	3353:  "fatpipe",
+	3354:  "suitjd",
+	3355:  "ordinox-dbase",
+	3356:  "upnotifyps",
+	3357:  "adtech-test",
+	3358:  "mpsysrmsvr",
+	3359:  "wg-netforce",
+	3360:  "kv-server",
+	3361:  "kv-agent",
+	3362:  "dj-ilm",
+	3363:  "nati-vi-server",
+	3364:  "creativeserver",
+	3365:  "contentserver",
+	3366:  "creativepartnr",
+	3372:  "tip2",
+	3373:  "lavenir-lm",
+	3374:  "cluster-disc",
+	3375:  "vsnm-agent",
+	3376:  "cdbroker",
+	3377:  "cogsys-lm",
+	3378:  "wsicopy",
+	3379:  "socorfs",
+	3380:  "sns-channels",
+	3381:  "geneous",
+	3382:  "fujitsu-neat",
+	3383:  "esp-lm",
+	3384:  "hp-clic",
+	3385:  "qnxnetman",
+	3386:  "gprs-data",
+	3387:  "backroomnet",
+	3388:  "cbserver",
+	3389:  "ms-wbt-server",
+	3390:  "dsc",
+	3391:  "savant",
+	3392:  "efi-lm",
+	3393:  "d2k-tapestry1",
+	3394:  "d2k-tapestry2",
+	3395:  "dyna-lm",
+	3396:  "printer-agent",
+	3397:  "cloanto-lm",
+	3398:  "mercantile",
+	3399:  "csms",
+	3400:  "csms2",
+	3401:  "filecast",
+	3402:  "fxaengine-net",
+	3405:  "nokia-ann-ch1",
+	3406:  "nokia-ann-ch2",
+	3407:  "ldap-admin",
+	3408:  "BESApi",
+	3409:  "networklens",
+	3410:  "networklenss",
+	3411:  "biolink-auth",
+	3412:  "xmlblaster",
+	3413:  "svnet",
+	3414:  "wip-port",
+	3415:  "bcinameservice",
+	3416:  "commandport",
+	3417:  "csvr",
+	3418:  "rnmap",
+	3419:  "softaudit",
+	3420:  "ifcp-port",
+	3421:  "bmap",
+	3422:  "rusb-sys-port",
+	3423:  "xtrm",
+	3424:  "xtrms",
+	3425:  "agps-port",
+	3426:  "arkivio",
+	3427:  "websphere-snmp",
+	3428:  "twcss",
+	3429:  "gcsp",
+	3430:  "ssdispatch",
+	3431:  "ndl-als",
+	3432:  "osdcp",
+	3433:  "opnet-smp",
+	3434:  "opencm",
+	3435:  "pacom",
+	3436:  "gc-config",
+	3437:  "autocueds",
+	3438:  "spiral-admin",
+	3439:  "hri-port",
+	3440:  "ans-console",
+	3441:  "connect-client",
+	3442:  "connect-server",
+	3443:  "ov-nnm-websrv",
+	3444:  "denali-server",
+	3445:  "monp",
+	3446:  "3comfaxrpc",
+	3447:  "directnet",
+	3448:  "dnc-port",
+	3449:  "hotu-chat",
+	3450:  "castorproxy",
+	3451:  "asam",
+	3452:  "sabp-signal",
+	3453:  "pscupd",
+	3454:  "mira",
+	3455:  "prsvp",
+	3456:  "vat",
+	3457:  "vat-control",
+	3458:  "d3winosfi",
+	3459:  "integral",
+	3460:  "edm-manager",
+	3461:  "edm-stager",
+	3462:  "edm-std-notify",
+	3463:  "edm-adm-notify",
+	3464:  "edm-mgr-sync",
+	3465:  "edm-mgr-cntrl",
+	3466:  "workflow",
+	3467:  "rcst",
+	3468:  "ttcmremotectrl",
+	3469:  "pluribus",
+	3470:  "jt400",
+	3471:  "jt400-ssl",
+	3472:  "jaugsremotec-1",
+	3473:  "jaugsremotec-2",
+	3474:  "ttntspauto",
+	3475:  "genisar-port",
+	3476:  "nppmp",
+	3477:  "ecomm",
+	3478:  "stun",
+	3479:  "twrpc",
+	3480:  "plethora",
+	3481:  "cleanerliverc",
+	3482:  "vulture",
+	3483:  "slim-devices",
+	3484:  "gbs-stp",
+	3485:  "celatalk",
+	3486:  "ifsf-hb-port",
+	3487:  "ltctcp",
+	3488:  "fs-rh-srv",
+	3489:  "dtp-dia",
+	3490:  "colubris",
+	3491:  "swr-port",
+	3492:  "tvdumtray-port",
+	3493:  "nut",
+	3494:  "ibm3494",
+	3495:  "seclayer-tcp",
+	3496:  "seclayer-tls",
+	3497:  "ipether232port",
+	3498:  "dashpas-port",
+	3499:  "sccip-media",
+	3500:  "rtmp-port",
+	3501:  "isoft-p2p",
+	3502:  "avinstalldisc",
+	3503:  "lsp-ping",
+	3504:  "ironstorm",
+	3505:  "ccmcomm",
+	3506:  "apc-3506",
+	3507:  "nesh-broker",
+	3508:  "interactionweb",
+	3509:  "vt-ssl",
+	3510:  "xss-port",
+	3511:  "webmail-2",
+	3512:  "aztec",
+	3513:  "arcpd",
+	3514:  "must-p2p",
+	3515:  "must-backplane",
+	3516:  "smartcard-port",
+	3517:  "802-11-iapp",
+	3518:  "artifact-msg",
+	3519:  "nvmsgd",
+	3520:  "galileolog",
+	3521:  "mc3ss",
+	3522:  "nssocketport",
+	3523:  "odeumservlink",
+	3524:  "ecmport",
+	3525:  "eisport",
+	3526:  "starquiz-port",
+	3527:  "beserver-msg-q",
+	3528:  "jboss-iiop",
+	3529:  "jboss-iiop-ssl",
+	3530:  "gf",
+	3531:  "joltid",
+	3532:  "raven-rmp",
+	3533:  "raven-rdp",
+	3534:  "urld-port",
+	3535:  "ms-la",
+	3536:  "snac",
+	3537:  "ni-visa-remote",
+	3538:  "ibm-diradm",
+	3539:  "ibm-diradm-ssl",
+	3540:  "pnrp-port",
+	3541:  "voispeed-port",
+	3542:  "hacl-monitor",
+	3543:  "qftest-lookup",
+	3544:  "teredo",
+	3545:  "camac",
+	3547:  "symantec-sim",
+	3548:  "interworld",
+	3549:  "tellumat-nms",
+	3550:  "ssmpp",
+	3551:  "apcupsd",
+	3552:  "taserver",
+	3553:  "rbr-discovery",
+	3554:  "questnotify",
+	3555:  "razor",
+	3556:  "sky-transport",
+	3557:  "personalos-001",
+	3558:  "mcp-port",
+	3559:  "cctv-port",
+	3560:  "iniserve-port",
+	3561:  "bmc-onekey",
+	3562:  "sdbproxy",
+	3563:  "watcomdebug",
+	3564:  "esimport",
+	3565:  "m2pa",
+	3566:  "quest-data-hub",
+	3567:  "dof-eps",
+	3568:  "dof-tunnel-sec",
+	3569:  "mbg-ctrl",
+	3570:  "mccwebsvr-port",
+	3571:  "megardsvr-port",
+	3572:  "megaregsvrport",
+	3573:  "tag-ups-1",
+	3574:  "dmaf-server",
+	3575:  "ccm-port",
+	3576:  "cmc-port",
+	3577:  "config-port",
+	3578:  "data-port",
+	3579:  "ttat3lb",
+	3580:  "nati-svrloc",
+	3581:  "kfxaclicensing",
+	3582:  "press",
+	3583:  "canex-watch",
+	3584:  "u-dbap",
+	3585:  "emprise-lls",
+	3586:  "emprise-lsc",
+	3587:  "p2pgroup",
+	3588:  "sentinel",
+	3589:  "isomair",
+	3590:  "wv-csp-sms",
+	3591:  "gtrack-server",
+	3592:  "gtrack-ne",
+	3593:  "bpmd",
+	3594:  "mediaspace",
+	3595:  "shareapp",
+	3596:  "iw-mmogame",
+	3597:  "a14",
+	3598:  "a15",
+	3599:  "quasar-server",
+	3600:  "trap-daemon",
+	3601:  "visinet-gui",
+	3602:  "infiniswitchcl",
+	3603:  "int-rcv-cntrl",
+	3604:  "bmc-jmx-port",
+	3605:  "comcam-io",
+	3606:  "splitlock",
+	3607:  "precise-i3",
+	3608:  "trendchip-dcp",
+	3609:  "cpdi-pidas-cm",
+	3610:  "echonet",
+	3611:  "six-degrees",
+	3612:  "hp-dataprotect",
+	3613:  "alaris-disc",
+	3614:  "sigma-port",
+	3615:  "start-network",
+	3616:  "cd3o-protocol",
+	3617:  "sharp-server",
+	3618:  "aairnet-1",
+	3619:  "aairnet-2",
+	3620:  "ep-pcp",
+	3621:  "ep-nsp",
+	3622:  "ff-lr-port",
+	3623:  "haipe-discover",
+	3624:  "dist-upgrade",
+	3625:  "volley",
+	3626:  "bvcdaemon-port",
+	3627:  "jamserverport",
+	3628:  "ept-machine",
+	3629:  "escvpnet",
+	3630:  "cs-remote-db",
+	3631:  "cs-services",
+	3632:  "distcc",
+	3633:  "wacp",
+	3634:  "hlibmgr",
+	3635:  "sdo",
+	3636:  "servistaitsm",
+	3637:  "scservp",
+	3638:  "ehp-backup",
+	3639:  "xap-ha",
+	3640:  "netplay-port1",
+	3641:  "netplay-port2",
+	3642:  "juxml-port",
+	3643:  "audiojuggler",
+	3644:  "ssowatch",
+	3645:  "cyc",
+	3646:  "xss-srv-port",
+	3647:  "splitlock-gw",
+	3648:  "fjcp",
+	3649:  "nmmp",
+	3650:  "prismiq-plugin",
+	3651:  "xrpc-registry",
+	3652:  "vxcrnbuport",
+	3653:  "tsp",
+	3654:  "vaprtm",
+	3655:  "abatemgr",
+	3656:  "abatjss",
+	3657:  "immedianet-bcn",
+	3658:  "ps-ams",
+	3659:  "apple-sasl",
+	3660:  "can-nds-ssl",
+	3661:  "can-ferret-ssl",
+	3662:  "pserver",
+	3663:  "dtp",
+	3664:  "ups-engine",
+	3665:  "ent-engine",
+	3666:  "eserver-pap",
+	3667:  "infoexch",
+	3668:  "dell-rm-port",
+	3669:  "casanswmgmt",
+	3670:  "smile",
+	3671:  "efcp",
+	3672:  "lispworks-orb",
+	3673:  "mediavault-gui",
+	3674:  "wininstall-ipc",
+	3675:  "calltrax",
+	3676:  "va-pacbase",
+	3677:  "roverlog",
+	3678:  "ipr-dglt",
+	3679:  "Escale (Newton Dock)",
+	3680:  "npds-tracker",
+	3681:  "bts-x73",
+	3682:  "cas-mapi",
+	3683:  "bmc-ea",
+	3684:  "faxstfx-port",
+	3685:  "dsx-agent",
+	3686:  "tnmpv2",
+	3687:  "simple-push",
+	3688:  "simple-push-s",
+	3689:  "daap",
+	3690:  "svn",
+	3691:  "magaya-network",
+	3692:  "intelsync",
+	3693:  "easl",
+	3695:  "bmc-data-coll",
+	3696:  "telnetcpcd",
+	3697:  "nw-license",
+	3698:  "sagectlpanel",
+	3699:  "kpn-icw",
+	3700:  "lrs-paging",
+	3701:  "netcelera",
+	3702:  "ws-discovery",
+	3703:  "adobeserver-3",
+	3704:  "adobeserver-4",
+	3705:  "adobeserver-5",
+	3706:  "rt-event",
+	3707:  "rt-event-s",
+	3708:  "sun-as-iiops",
+	3709:  "ca-idms",
+	3710:  "portgate-auth",
+	3711:  "edb-server2",
+	3712:  "sentinel-ent",
+	3713:  "tftps",
+	3714:  "delos-dms",
+	3715:  "anoto-rendezv",
+	3716:  "wv-csp-sms-cir",
+	3717:  "wv-csp-udp-cir",
+	3718:  "opus-services",
+	3719:  "itelserverport",
+	3720:  "ufastro-instr",
+	3721:  "xsync",
+	3722:  "xserveraid",
+	3723:  "sychrond",
+	3724:  "blizwow",
+	3725:  "na-er-tip",
+	3726:  "array-manager",
+	3727:  "e-mdu",
+	3728:  "e-woa",
+	3729:  "fksp-audit",
+	3730:  "client-ctrl",
+	3731:  "smap",
+	3732:  "m-wnn",
+	3733:  "multip-msg",
+	3734:  "synel-data",
+	3735:  "pwdis",
+	3736:  "rs-rmi",
+	3737:  "xpanel",
+	3738:  "versatalk",
+	3739:  "launchbird-lm",
+	3740:  "heartbeat",
+	3741:  "wysdma",
+	3742:  "cst-port",
+	3743:  "ipcs-command",
+	3744:  "sasg",
+	3745:  "gw-call-port",
+	3746:  "linktest",
+	3747:  "linktest-s",
+	3748:  "webdata",
+	3749:  "cimtrak",
+	3750:  "cbos-ip-port",
+	3751:  "gprs-cube",
+	3752:  "vipremoteagent",
+	3753:  "nattyserver",
+	3754:  "timestenbroker",
+	3755:  "sas-remote-hlp",
+	3756:  "canon-capt",
+	3757:  "grf-port",
+	3758:  "apw-registry",
+	3759:  "exapt-lmgr",
+	3760:  "adtempusclient",
+	3761:  "gsakmp",
+	3762:  "gbs-smp",
+	3763:  "xo-wave",
+	3764:  "mni-prot-rout",
+	3765:  "rtraceroute",
+	3766:  "sitewatch-s",
+	3767:  "listmgr-port",
+	3768:  "rblcheckd",
+	3769:  "haipe-otnk",
+	3770:  "cindycollab",
+	3771:  "paging-port",
+	3772:  "ctp",
+	3773:  "ctdhercules",
+	3774:  "zicom",
+	3775:  "ispmmgr",
+	3776:  "dvcprov-port",
+	3777:  "jibe-eb",
+	3778:  "c-h-it-port",
+	3779:  "cognima",
+	3780:  "nnp",
+	3781:  "abcvoice-port",
+	3782:  "iso-tp0s",
+	3783:  "bim-pem",
+	3784:  "bfd-control",
+	3785:  "bfd-echo",
+	3786:  "upstriggervsw",
+	3787:  "fintrx",
+	3788:  "isrp-port",
+	3789:  "remotedeploy",
+	3790:  "quickbooksrds",
+	3791:  "tvnetworkvideo",
+	3792:  "sitewatch",
+	3793:  "dcsoftware",
+	3794:  "jaus",
+	3795:  "myblast",
+	3796:  "spw-dialer",
+	3797:  "idps",
+	3798:  "minilock",
+	3799:  "radius-dynauth",
+	3800:  "pwgpsi",
+	3801:  "ibm-mgr",
+	3802:  "vhd",
+	3803:  "soniqsync",
+	3804:  "iqnet-port",
+	3805:  "tcpdataserver",
+	3806:  "wsmlb",
+	3807:  "spugna",
+	3808:  "sun-as-iiops-ca",
+	3809:  "apocd",
+	3810:  "wlanauth",
+	3811:  "amp",
+	3812:  "neto-wol-server",
+	3813:  "rap-ip",
+	3814:  "neto-dcs",
+	3815:  "lansurveyorxml",
+	3816:  "sunlps-http",
+	3817:  "tapeware",
+	3818:  "crinis-hb",
+	3819:  "epl-slp",
+	3820:  "scp",
+	3821:  "pmcp",
+	3822:  "acp-discovery",
+	3823:  "acp-conduit",
+	3824:  "acp-policy",
+	3825:  "ffserver",
+	3826:  "warmux",
+	3827:  "netmpi",
+	3828:  "neteh",
+	3829:  "neteh-ext",
+	3830:  "cernsysmgmtagt",
+	3831:  "dvapps",
+	3832:  "xxnetserver",
+	3833:  "aipn-auth",
+	3834:  "spectardata",
+	3835:  "spectardb",
+	3836:  "markem-dcp",
+	3837:  "mkm-discovery",
+	3838:  "sos",
+	3839:  "amx-rms",
+	3840:  "flirtmitmir",
+	3841:  "shiprush-db-svr",
+	3842:  "nhci",
+	3843:  "quest-agent",
+	3844:  "rnm",
+	3845:  "v-one-spp",
+	3846:  "an-pcp",
+	3847:  "msfw-control",
+	3848:  "item",
+	3849:  "spw-dnspreload",
+	3850:  "qtms-bootstrap",
+	3851:  "spectraport",
+	3852:  "sse-app-config",
+	3853:  "sscan",
+	3854:  "stryker-com",
+	3855:  "opentrac",
+	3856:  "informer",
+	3857:  "trap-port",
+	3858:  "trap-port-mom",
+	3859:  "nav-port",
+	3860:  "sasp",
+	3861:  "winshadow-hd",
+	3862:  "giga-pocket",
+	3863:  "asap-tcp",
+	3864:  "asap-tcp-tls",
+	3865:  "xpl",
+	3866:  "dzdaemon",
+	3867:  "dzoglserver",
+	3868:  "diameter",
+	3869:  "ovsam-mgmt",
+	3870:  "ovsam-d-agent",
+	3871:  "avocent-adsap",
+	3872:  "oem-agent",
+	3873:  "fagordnc",
+	3874:  "sixxsconfig",
+	3875:  "pnbscada",
+	3876:  "dl-agent",
+	3877:  "xmpcr-interface",
+	3878:  "fotogcad",
+	3879:  "appss-lm",
+	3880:  "igrs",
+	3881:  "idac",
+	3882:  "msdts1",
+	3883:  "vrpn",
+	3884:  "softrack-meter",
+	3885:  "topflow-ssl",
+	3886:  "nei-management",
+	3887:  "ciphire-data",
+	3888:  "ciphire-serv",
+	3889:  "dandv-tester",
+	3890:  "ndsconnect",
+	3891:  "rtc-pm-port",
+	3892:  "pcc-image-port",
+	3893:  "cgi-starapi",
+	3894:  "syam-agent",
+	3895:  "syam-smc",
+	3896:  "sdo-tls",
+	3897:  "sdo-ssh",
+	3898:  "senip",
+	3899:  "itv-control",
+	3900:  "udt-os",
+	3901:  "nimsh",
+	3902:  "nimaux",
+	3903:  "charsetmgr",
+	3904:  "omnilink-port",
+	3905:  "mupdate",
+	3906:  "topovista-data",
+	3907:  "imoguia-port",
+	3908:  "hppronetman",
+	3909:  "surfcontrolcpa",
+	3910:  "prnrequest",
+	3911:  "prnstatus",
+	3912:  "gbmt-stars",
+	3913:  "listcrt-port",
+	3914:  "listcrt-port-2",
+	3915:  "agcat",
+	3916:  "wysdmc",
+	3917:  "aftmux",
+	3918:  "pktcablemmcops",
+	3919:  "hyperip",
+	3920:  "exasoftport1",
+	3921:  "herodotus-net",
+	3922:  "sor-update",
+	3923:  "symb-sb-port",
+	3924:  "mpl-gprs-port",
+	3925:  "zmp",
+	3926:  "winport",
+	3927:  "natdataservice",
+	3928:  "netboot-pxe",
+	3929:  "smauth-port",
+	3930:  "syam-webserver",
+	3931:  "msr-plugin-port",
+	3932:  "dyn-site",
+	3933:  "plbserve-port",
+	3934:  "sunfm-port",
+	3935:  "sdp-portmapper",
+	3936:  "mailprox",
+	3937:  "dvbservdsc",
+	3938:  "dbcontrol-agent",
+	3939:  "aamp",
+	3940:  "xecp-node",
+	3941:  "homeportal-web",
+	3942:  "srdp",
+	3943:  "tig",
+	3944:  "sops",
+	3945:  "emcads",
+	3946:  "backupedge",
+	3947:  "ccp",
+	3948:  "apdap",
+	3949:  "drip",
+	3950:  "namemunge",
+	3951:  "pwgippfax",
+	3952:  "i3-sessionmgr",
+	3953:  "xmlink-connect",
+	3954:  "adrep",
+	3955:  "p2pcommunity",
+	3956:  "gvcp",
+	3957:  "mqe-broker",
+	3958:  "mqe-agent",
+	3959:  "treehopper",
+	3960:  "bess",
+	3961:  "proaxess",
+	3962:  "sbi-agent",
+	3963:  "thrp",
+	3964:  "sasggprs",
+	3965:  "ati-ip-to-ncpe",
+	3966:  "bflckmgr",
+	3967:  "ppsms",
+	3968:  "ianywhere-dbns",
+	3969:  "landmarks",
+	3970:  "lanrevagent",
+	3971:  "lanrevserver",
+	3972:  "iconp",
+	3973:  "progistics",
+	3974:  "citysearch",
+	3975:  "airshot",
+	3976:  "opswagent",
+	3977:  "opswmanager",
+	3978:  "secure-cfg-svr",
+	3979:  "smwan",
+	3980:  "acms",
+	3981:  "starfish",
+	3982:  "eis",
+	3983:  "eisp",
+	3984:  "mapper-nodemgr",
+	3985:  "mapper-mapethd",
+	3986:  "mapper-ws-ethd",
+	3987:  "centerline",
+	3988:  "dcs-config",
+	3989:  "bv-queryengine",
+	3990:  "bv-is",
+	3991:  "bv-smcsrv",
+	3992:  "bv-ds",
+	3993:  "bv-agent",
+	3995:  "iss-mgmt-ssl",
+	3996:  "abcsoftware",
+	3997:  "agentsease-db",
+	3998:  "dnx",
+	3999:  "nvcnet",
+	4000:  "terabase",
+	4001:  "newoak",
+	4002:  "pxc-spvr-ft",
+	4003:  "pxc-splr-ft",
+	4004:  "pxc-roid",
+	4005:  "pxc-pin",
+	4006:  "pxc-spvr",
+	4007:  "pxc-splr",
+	4008:  "netcheque",
+	4009:  "chimera-hwm",
+	4010:  "samsung-unidex",
+	4011:  "altserviceboot",
+	4012:  "pda-gate",
+	4013:  "acl-manager",
+	4014:  "taiclock",
+	4015:  "talarian-mcast1",
+	4016:  "talarian-mcast2",
+	4017:  "talarian-mcast3",
+	4018:  "talarian-mcast4",
+	4019:  "talarian-mcast5",
+	4020:  "trap",
+	4021:  "nexus-portal",
+	4022:  "dnox",
+	4023:  "esnm-zoning",
+	4024:  "tnp1-port",
+	4025:  "partimage",
+	4026:  "as-debug",
+	4027:  "bxp",
+	4028:  "dtserver-port",
+	4029:  "ip-qsig",
+	4030:  "jdmn-port",
+	4031:  "suucp",
+	4032:  "vrts-auth-port",
+	4033:  "sanavigator",
+	4034:  "ubxd",
+	4035:  "wap-push-http",
+	4036:  "wap-push-https",
+	4037:  "ravehd",
+	4038:  "fazzt-ptp",
+	4039:  "fazzt-admin",
+	4040:  "yo-main",
+	4041:  "houston",
+	4042:  "ldxp",
+	4043:  "nirp",
+	4044:  "ltp",
+	4045:  "npp",
+	4046:  "acp-proto",
+	4047:  "ctp-state",
+	4049:  "wafs",
+	4050:  "cisco-wafs",
+	4051:  "cppdp",
+	4052:  "interact",
+	4053:  "ccu-comm-1",
+	4054:  "ccu-comm-2",
+	4055:  "ccu-comm-3",
+	4056:  "lms",
+	4057:  "wfm",
+	4058:  "kingfisher",
+	4059:  "dlms-cosem",
+	4060:  "dsmeter-iatc",
+	4061:  "ice-location",
+	4062:  "ice-slocation",
+	4063:  "ice-router",
+	4064:  "ice-srouter",
+	4065:  "avanti-cdp",
+	4066:  "pmas",
+	4067:  "idp",
+	4068:  "ipfltbcst",
+	4069:  "minger",
+	4070:  "tripe",
+	4071:  "aibkup",
+	4072:  "zieto-sock",
+	4073:  "iRAPP",
+	4074:  "cequint-cityid",
+	4075:  "perimlan",
+	4076:  "seraph",
+	4078:  "cssp",
+	4079:  "santools",
+	4080:  "lorica-in",
+	4081:  "lorica-in-sec",
+	4082:  "lorica-out",
+	4083:  "lorica-out-sec",
+	4085:  "ezmessagesrv",
+	4087:  "applusservice",
+	4088:  "npsp",
+	4089:  "opencore",
+	4090:  "omasgport",
+	4091:  "ewinstaller",
+	4092:  "ewdgs",
+	4093:  "pvxpluscs",
+	4094:  "sysrqd",
+	4095:  "xtgui",
+	4096:  "bre",
+	4097:  "patrolview",
+	4098:  "drmsfsd",
+	4099:  "dpcp",
+	4100:  "igo-incognito",
+	4101:  "brlp-0",
+	4102:  "brlp-1",
+	4103:  "brlp-2",
+	4104:  "brlp-3",
+	4105:  "shofar",
+	4106:  "synchronite",
+	4107:  "j-ac",
+	4108:  "accel",
+	4109:  "izm",
+	4110:  "g2tag",
+	4111:  "xgrid",
+	4112:  "apple-vpns-rp",
+	4113:  "aipn-reg",
+	4114:  "jomamqmonitor",
+	4115:  "cds",
+	4116:  "smartcard-tls",
+	4117:  "hillrserv",
+	4118:  "netscript",
+	4119:  "assuria-slm",
+	4120:  "minirem",
+	4121:  "e-builder",
+	4122:  "fprams",
+	4123:  "z-wave",
+	4124:  "tigv2",
+	4125:  "opsview-envoy",
+	4126:  "ddrepl",
+	4127:  "unikeypro",
+	4128:  "nufw",
+	4129:  "nuauth",
+	4130:  "fronet",
+	4131:  "stars",
+	4132:  "nuts-dem",
+	4133:  "nuts-bootp",
+	4134:  "nifty-hmi",
+	4135:  "cl-db-attach",
+	4136:  "cl-db-request",
+	4137:  "cl-db-remote",
+	4138:  "nettest",
+	4139:  "thrtx",
+	4140:  "cedros-fds",
+	4141:  "oirtgsvc",
+	4142:  "oidocsvc",
+	4143:  "oidsr",
+	4145:  "vvr-control",
+	4146:  "tgcconnect",
+	4147:  "vrxpservman",
+	4148:  "hhb-handheld",
+	4149:  "agslb",
+	4150:  "PowerAlert-nsa",
+	4151:  "menandmice-noh",
+	4152:  "idig-mux",
+	4153:  "mbl-battd",
+	4154:  "atlinks",
+	4155:  "bzr",
+	4156:  "stat-results",
+	4157:  "stat-scanner",
+	4158:  "stat-cc",
+	4159:  "nss",
+	4160:  "jini-discovery",
+	4161:  "omscontact",
+	4162:  "omstopology",
+	4163:  "silverpeakpeer",
+	4164:  "silverpeakcomm",
+	4165:  "altcp",
+	4166:  "joost",
+	4167:  "ddgn",
+	4168:  "pslicser",
+	4169:  "iadt",
+	4170:  "d-cinema-csp",
+	4171:  "ml-svnet",
+	4172:  "pcoip",
+	4174:  "smcluster",
+	4175:  "bccp",
+	4176:  "tl-ipcproxy",
+	4177:  "wello",
+	4178:  "storman",
+	4179:  "MaxumSP",
+	4180:  "httpx",
+	4181:  "macbak",
+	4182:  "pcptcpservice",
+	4183:  "cyborgnet",
+	4184:  "universe-suite",
+	4185:  "wcpp",
+	4186:  "boxbackupstore",
+	4187:  "csc-proxy",
+	4188:  "vatata",
+	4189:  "pcep",
+	4190:  "sieve",
+	4192:  "azeti",
+	4193:  "pvxplusio",
+	4197:  "hctl",
+	4199:  "eims-admin",
+	4300:  "corelccam",
+	4301:  "d-data",
+	4302:  "d-data-control",
+	4303:  "srcp",
+	4304:  "owserver",
+	4305:  "batman",
+	4306:  "pinghgl",
+	4307:  "trueconf",
+	4308:  "compx-lockview",
+	4309:  "dserver",
+	4310:  "mirrtex",
+	4311:  "p6ssmc",
+	4312:  "pscl-mgt",
+	4313:  "perrla",
+	4314:  "choiceview-agt",
+	4316:  "choiceview-clt",
+	4320:  "fdt-rcatp",
+	4321:  "rwhois",
+	4322:  "trim-event",
+	4323:  "trim-ice",
+	4325:  "geognosisman",
+	4326:  "geognosis",
+	4327:  "jaxer-web",
+	4328:  "jaxer-manager",
+	4329:  "publiqare-sync",
+	4330:  "dey-sapi",
+	4331:  "ktickets-rest",
+	4333:  "ahsp",
+	4334:  "netconf-ch-ssh",
+	4335:  "netconf-ch-tls",
+	4336:  "restconf-ch-tls",
+	4340:  "gaia",
+	4341:  "lisp-data",
+	4342:  "lisp-cons",
+	4343:  "unicall",
+	4344:  "vinainstall",
+	4345:  "m4-network-as",
+	4346:  "elanlm",
+	4347:  "lansurveyor",
+	4348:  "itose",
+	4349:  "fsportmap",
+	4350:  "net-device",
+	4351:  "plcy-net-svcs",
+	4352:  "pjlink",
+	4353:  "f5-iquery",
+	4354:  "qsnet-trans",
+	4355:  "qsnet-workst",
+	4356:  "qsnet-assist",
+	4357:  "qsnet-cond",
+	4358:  "qsnet-nucl",
+	4359:  "omabcastltkm",
+	4360:  "matrix-vnet",
+	4368:  "wxbrief",
+	4369:  "epmd",
+	4370:  "elpro-tunnel",
+	4371:  "l2c-control",
+	4372:  "l2c-data",
+	4373:  "remctl",
+	4374:  "psi-ptt",
+	4375:  "tolteces",
+	4376:  "bip",
+	4377:  "cp-spxsvr",
+	4378:  "cp-spxdpy",
+	4379:  "ctdb",
+	4389:  "xandros-cms",
+	4390:  "wiegand",
+	4391:  "apwi-imserver",
+	4392:  "apwi-rxserver",
+	4393:  "apwi-rxspooler",
+	4395:  "omnivisionesx",
+	4396:  "fly",
+	4400:  "ds-srv",
+	4401:  "ds-srvr",
+	4402:  "ds-clnt",
+	4403:  "ds-user",
+	4404:  "ds-admin",
+	4405:  "ds-mail",
+	4406:  "ds-slp",
+	4407:  "nacagent",
+	4408:  "slscc",
+	4409:  "netcabinet-com",
+	4410:  "itwo-server",
+	4411:  "found",
+	4413:  "avi-nms",
+	4414:  "updog",
+	4415:  "brcd-vr-req",
+	4416:  "pjj-player",
+	4417:  "workflowdir",
+	4419:  "cbp",
+	4420:  "nvm-express",
+	4421:  "scaleft",
+	4422:  "tsepisp",
+	4423:  "thingkit",
+	4425:  "netrockey6",
+	4426:  "beacon-port-2",
+	4427:  "drizzle",
+	4428:  "omviserver",
+	4429:  "omviagent",
+	4430:  "rsqlserver",
+	4431:  "wspipe",
+	4432:  "l-acoustics",
+	4433:  "vop",
+	4442:  "saris",
+	4443:  "pharos",
+	4444:  "krb524",
+	4445:  "upnotifyp",
+	4446:  "n1-fwp",
+	4447:  "n1-rmgmt",
+	4448:  "asc-slmd",
+	4449:  "privatewire",
+	4450:  "camp",
+	4451:  "ctisystemmsg",
+	4452:  "ctiprogramload",
+	4453:  "nssalertmgr",
+	4454:  "nssagentmgr",
+	4455:  "prchat-user",
+	4456:  "prchat-server",
+	4457:  "prRegister",
+	4458:  "mcp",
+	4484:  "hpssmgmt",
+	4485:  "assyst-dr",
+	4486:  "icms",
+	4487:  "prex-tcp",
+	4488:  "awacs-ice",
+	4500:  "ipsec-nat-t",
+	4535:  "ehs",
+	4536:  "ehs-ssl",
+	4537:  "wssauthsvc",
+	4538:  "swx-gate",
+	4545:  "worldscores",
+	4546:  "sf-lm",
+	4547:  "lanner-lm",
+	4548:  "synchromesh",
+	4549:  "aegate",
+	4550:  "gds-adppiw-db",
+	4551:  "ieee-mih",
+	4552:  "menandmice-mon",
+	4553:  "icshostsvc",
+	4554:  "msfrs",
+	4555:  "rsip",
+	4556:  "dtn-bundle",
+	4559:  "hylafax",
+	4563:  "amahi-anywhere",
+	4566:  "kwtc",
+	4567:  "tram",
+	4568:  "bmc-reporting",
+	4569:  "iax",
+	4570:  "deploymentmap",
+	4573:  "cardifftec-back",
+	4590:  "rid",
+	4591:  "l3t-at-an",
+	4593:  "ipt-anri-anri",
+	4594:  "ias-session",
+	4595:  "ias-paging",
+	4596:  "ias-neighbor",
+	4597:  "a21-an-1xbs",
+	4598:  "a16-an-an",
+	4599:  "a17-an-an",
+	4600:  "piranha1",
+	4601:  "piranha2",
+	4602:  "mtsserver",
+	4603:  "menandmice-upg",
+	4604:  "irp",
+	4605:  "sixchat",
+	4658:  "playsta2-app",
+	4659:  "playsta2-lob",
+	4660:  "smaclmgr",
+	4661:  "kar2ouche",
+	4662:  "oms",
+	4663:  "noteit",
+	4664:  "ems",
+	4665:  "contclientms",
+	4666:  "eportcomm",
+	4667:  "mmacomm",
+	4668:  "mmaeds",
+	4669:  "eportcommdata",
+	4670:  "light",
+	4671:  "acter",
+	4672:  "rfa",
+	4673:  "cxws",
+	4674:  "appiq-mgmt",
+	4675:  "dhct-status",
+	4676:  "dhct-alerts",
+	4677:  "bcs",
+	4678:  "traversal",
+	4679:  "mgesupervision",
+	4680:  "mgemanagement",
+	4681:  "parliant",
+	4682:  "finisar",
+	4683:  "spike",
+	4684:  "rfid-rp1",
+	4685:  "autopac",
+	4686:  "msp-os",
+	4687:  "nst",
+	4688:  "mobile-p2p",
+	4689:  "altovacentral",
+	4690:  "prelude",
+	4691:  "mtn",
+	4692:  "conspiracy",
+	4700:  "netxms-agent",
+	4701:  "netxms-mgmt",
+	4702:  "netxms-sync",
+	4703:  "npqes-test",
+	4704:  "assuria-ins",
+	4711:  "trinity-dist",
+	4725:  "truckstar",
+	4727:  "fcis",
+	4728:  "capmux",
+	4730:  "gearman",
+	4731:  "remcap",
+	4733:  "resorcs",
+	4737:  "ipdr-sp",
+	4738:  "solera-lpn",
+	4739:  "ipfix",
+	4740:  "ipfixs",
+	4741:  "lumimgrd",
+	4742:  "sicct",
+	4743:  "openhpid",
+	4744:  "ifsp",
+	4745:  "fmp",
+	4749:  "profilemac",
+	4750:  "ssad",
+	4751:  "spocp",
+	4752:  "snap",
+	4753:  "simon",
+	4756:  "RDCenter",
+	4774:  "converge",
+	4784:  "bfd-multi-ctl",
+	4786:  "smart-install",
+	4787:  "sia-ctrl-plane",
+	4788:  "xmcp",
+	4800:  "iims",
+	4801:  "iwec",
+	4802:  "ilss",
+	4803:  "notateit",
+	4827:  "htcp",
+	4837:  "varadero-0",
+	4838:  "varadero-1",
+	4839:  "varadero-2",
+	4840:  "opcua-tcp",
+	4841:  "quosa",
+	4842:  "gw-asv",
+	4843:  "opcua-tls",
+	4844:  "gw-log",
+	4845:  "wcr-remlib",
+	4846:  "contamac-icm",
+	4847:  "wfc",
+	4848:  "appserv-http",
+	4849:  "appserv-https",
+	4850:  "sun-as-nodeagt",
+	4851:  "derby-repli",
+	4867:  "unify-debug",
+	4868:  "phrelay",
+	4869:  "phrelaydbg",
+	4870:  "cc-tracking",
+	4871:  "wired",
+	4876:  "tritium-can",
+	4877:  "lmcs",
+	4879:  "wsdl-event",
+	4880:  "hislip",
+	4883:  "wmlserver",
+	4884:  "hivestor",
+	4885:  "abbs",
+	4894:  "lyskom",
+	4899:  "radmin-port",
+	4900:  "hfcs",
+	4901:  "flr-agent",
+	4902:  "magiccontrol",
+	4912:  "lutap",
+	4913:  "lutcp",
+	4914:  "bones",
+	4915:  "frcs",
+	4940:  "eq-office-4940",
+	4941:  "eq-office-4941",
+	4942:  "eq-office-4942",
+	4949:  "munin",
+	4950:  "sybasesrvmon",
+	4951:  "pwgwims",
+	4952:  "sagxtsds",
+	4953:  "dbsyncarbiter",
+	4969:  "ccss-qmm",
+	4970:  "ccss-qsm",
+	4971:  "burp",
+	4984:  "webyast",
+	4985:  "gerhcs",
+	4986:  "mrip",
+	4987:  "smar-se-port1",
+	4988:  "smar-se-port2",
+	4989:  "parallel",
+	4990:  "busycal",
+	4991:  "vrt",
+	4999:  "hfcs-manager",
+	5000:  "commplex-main",
+	5001:  "commplex-link",
+	5002:  "rfe",
+	5003:  "fmpro-internal",
+	5004:  "avt-profile-1",
+	5005:  "avt-profile-2",
+	5006:  "wsm-server",
+	5007:  "wsm-server-ssl",
+	5008:  "synapsis-edge",
+	5009:  "winfs",
+	5010:  "telelpathstart",
+	5011:  "telelpathattack",
+	5012:  "nsp",
+	5013:  "fmpro-v6",
+	5015:  "fmwp",
+	5020:  "zenginkyo-1",
+	5021:  "zenginkyo-2",
+	5022:  "mice",
+	5023:  "htuilsrv",
+	5024:  "scpi-telnet",
+	5025:  "scpi-raw",
+	5026:  "strexec-d",
+	5027:  "strexec-s",
+	5028:  "qvr",
+	5029:  "infobright",
+	5030:  "surfpass",
+	5032:  "signacert-agent",
+	5033:  "jtnetd-server",
+	5034:  "jtnetd-status",
+	5042:  "asnaacceler8db",
+	5043:  "swxadmin",
+	5044:  "lxi-evntsvc",
+	5045:  "osp",
+	5048:  "texai",
+	5049:  "ivocalize",
+	5050:  "mmcc",
+	5051:  "ita-agent",
+	5052:  "ita-manager",
+	5053:  "rlm",
+	5054:  "rlm-admin",
+	5055:  "unot",
+	5056:  "intecom-ps1",
+	5057:  "intecom-ps2",
+	5059:  "sds",
+	5060:  "sip",
+	5061:  "sips",
+	5062:  "na-localise",
+	5063:  "csrpc",
+	5064:  "ca-1",
+	5065:  "ca-2",
+	5066:  "stanag-5066",
+	5067:  "authentx",
+	5068:  "bitforestsrv",
+	5069:  "i-net-2000-npr",
+	5070:  "vtsas",
+	5071:  "powerschool",
+	5072:  "ayiya",
+	5073:  "tag-pm",
+	5074:  "alesquery",
+	5075:  "pvaccess",
+	5080:  "onscreen",
+	5081:  "sdl-ets",
+	5082:  "qcp",
+	5083:  "qfp",
+	5084:  "llrp",
+	5085:  "encrypted-llrp",
+	5086:  "aprigo-cs",
+	5087:  "biotic",
+	5093:  "sentinel-lm",
+	5094:  "hart-ip",
+	5099:  "sentlm-srv2srv",
+	5100:  "socalia",
+	5101:  "talarian-tcp",
+	5102:  "oms-nonsecure",
+	5103:  "actifio-c2c",
+	5106:  "actifioudsagent",
+	5107:  "actifioreplic",
+	5111:  "taep-as-svc",
+	5112:  "pm-cmdsvr",
+	5114:  "ev-services",
+	5115:  "autobuild",
+	5117:  "gradecam",
+	5120:  "barracuda-bbs",
+	5133:  "nbt-pc",
+	5134:  "ppactivation",
+	5135:  "erp-scale",
+	5137:  "ctsd",
+	5145:  "rmonitor-secure",
+	5146:  "social-alarm",
+	5150:  "atmp",
+	5151:  "esri-sde",
+	5152:  "sde-discovery",
+	5153:  "toruxserver",
+	5154:  "bzflag",
+	5155:  "asctrl-agent",
+	5156:  "rugameonline",
+	5157:  "mediat",
+	5161:  "snmpssh",
+	5162:  "snmpssh-trap",
+	5163:  "sbackup",
+	5164:  "vpa",
+	5165:  "ife-icorp",
+	5166:  "winpcs",
+	5167:  "scte104",
+	5168:  "scte30",
+	5172:  "pcoip-mgmt",
+	5190:  "aol",
+	5191:  "aol-1",
+	5192:  "aol-2",
+	5193:  "aol-3",
+	5194:  "cpscomm",
+	5195:  "ampl-lic",
+	5196:  "ampl-tableproxy",
+	5197:  "tunstall-lwp",
+	5200:  "targus-getdata",
+	5201:  "targus-getdata1",
+	5202:  "targus-getdata2",
+	5203:  "targus-getdata3",
+	5209:  "nomad",
+	5215:  "noteza",
+	5221:  "3exmp",
+	5222:  "xmpp-client",
+	5223:  "hpvirtgrp",
+	5224:  "hpvirtctrl",
+	5225:  "hp-server",
+	5226:  "hp-status",
+	5227:  "perfd",
+	5228:  "hpvroom",
+	5229:  "jaxflow",
+	5230:  "jaxflow-data",
+	5231:  "crusecontrol",
+	5232:  "csedaemon",
+	5233:  "enfs",
+	5234:  "eenet",
+	5235:  "galaxy-network",
+	5236:  "padl2sim",
+	5237:  "mnet-discovery",
+	5245:  "downtools",
+	5248:  "caacws",
+	5249:  "caaclang2",
+	5250:  "soagateway",
+	5251:  "caevms",
+	5252:  "movaz-ssc",
+	5253:  "kpdp",
+	5254:  "logcabin",
+	5264:  "3com-njack-1",
+	5265:  "3com-njack-2",
+	5269:  "xmpp-server",
+	5270:  "cartographerxmp",
+	5271:  "cuelink",
+	5272:  "pk",
+	5280:  "xmpp-bosh",
+	5281:  "undo-lm",
+	5282:  "transmit-port",
+	5298:  "presence",
+	5299:  "nlg-data",
+	5300:  "hacl-hb",
+	5301:  "hacl-gs",
+	5302:  "hacl-cfg",
+	5303:  "hacl-probe",
+	5304:  "hacl-local",
+	5305:  "hacl-test",
+	5306:  "sun-mc-grp",
+	5307:  "sco-aip",
+	5308:  "cfengine",
+	5309:  "jprinter",
+	5310:  "outlaws",
+	5312:  "permabit-cs",
+	5313:  "rrdp",
+	5314:  "opalis-rbt-ipc",
+	5315:  "hacl-poll",
+	5316:  "hpbladems",
+	5317:  "hpdevms",
+	5318:  "pkix-cmc",
+	5320:  "bsfserver-zn",
+	5321:  "bsfsvr-zn-ssl",
+	5343:  "kfserver",
+	5344:  "xkotodrcp",
+	5349:  "stuns",
+	5352:  "dns-llq",
+	5353:  "mdns",
+	5354:  "mdnsresponder",
+	5355:  "llmnr",
+	5356:  "ms-smlbiz",
+	5357:  "wsdapi",
+	5358:  "wsdapi-s",
+	5359:  "ms-alerter",
+	5360:  "ms-sideshow",
+	5361:  "ms-s-sideshow",
+	5362:  "serverwsd2",
+	5363:  "net-projection",
+	5397:  "stresstester",
+	5398:  "elektron-admin",
+	5399:  "securitychase",
+	5400:  "excerpt",
+	5401:  "excerpts",
+	5402:  "mftp",
+	5403:  "hpoms-ci-lstn",
+	5404:  "hpoms-dps-lstn",
+	5405:  "netsupport",
+	5406:  "systemics-sox",
+	5407:  "foresyte-clear",
+	5408:  "foresyte-sec",
+	5409:  "salient-dtasrv",
+	5410:  "salient-usrmgr",
+	5411:  "actnet",
+	5412:  "continuus",
+	5413:  "wwiotalk",
+	5414:  "statusd",
+	5415:  "ns-server",
+	5416:  "sns-gateway",
+	5417:  "sns-agent",
+	5418:  "mcntp",
+	5419:  "dj-ice",
+	5420:  "cylink-c",
+	5421:  "netsupport2",
+	5422:  "salient-mux",
+	5423:  "virtualuser",
+	5424:  "beyond-remote",
+	5425:  "br-channel",
+	5426:  "devbasic",
+	5427:  "sco-peer-tta",
+	5428:  "telaconsole",
+	5429:  "base",
+	5430:  "radec-corp",
+	5431:  "park-agent",
+	5432:  "postgresql",
+	5433:  "pyrrho",
+	5434:  "sgi-arrayd",
+	5435:  "sceanics",
+	5443:  "spss",
+	5445:  "smbdirect",
+	5450:  "tiepie",
+	5453:  "surebox",
+	5454:  "apc-5454",
+	5455:  "apc-5455",
+	5456:  "apc-5456",
+	5461:  "silkmeter",
+	5462:  "ttl-publisher",
+	5463:  "ttlpriceproxy",
+	5464:  "quailnet",
+	5465:  "netops-broker",
+	5470:  "apsolab-col",
+	5471:  "apsolab-cols",
+	5472:  "apsolab-tag",
+	5473:  "apsolab-tags",
+	5475:  "apsolab-data",
+	5500:  "fcp-addr-srvr1",
+	5501:  "fcp-addr-srvr2",
+	5502:  "fcp-srvr-inst1",
+	5503:  "fcp-srvr-inst2",
+	5504:  "fcp-cics-gw1",
+	5505:  "checkoutdb",
+	5506:  "amc",
+	5507:  "psl-management",
+	5550:  "cbus",
+	5553:  "sgi-eventmond",
+	5554:  "sgi-esphttp",
+	5555:  "personal-agent",
+	5556:  "freeciv",
+	5557:  "farenet",
+	5565:  "hpe-dp-bura",
+	5566:  "westec-connect",
+	5567:  "dof-dps-mc-sec",
+	5568:  "sdt",
+	5569:  "rdmnet-ctrl",
+	5573:  "sdmmp",
+	5574:  "lsi-bobcat",
+	5575:  "ora-oap",
+	5579:  "fdtracks",
+	5580:  "tmosms0",
+	5581:  "tmosms1",
+	5582:  "fac-restore",
+	5583:  "tmo-icon-sync",
+	5584:  "bis-web",
+	5585:  "bis-sync",
+	5586:  "att-mt-sms",
+	5597:  "ininmessaging",
+	5598:  "mctfeed",
+	5599:  "esinstall",
+	5600:  "esmmanager",
+	5601:  "esmagent",
+	5602:  "a1-msc",
+	5603:  "a1-bs",
+	5604:  "a3-sdunode",
+	5605:  "a4-sdunode",
+	5618:  "efr",
+	5627:  "ninaf",
+	5628:  "htrust",
+	5629:  "symantec-sfdb",
+	5630:  "precise-comm",
+	5631:  "pcanywheredata",
+	5632:  "pcanywherestat",
+	5633:  "beorl",
+	5634:  "xprtld",
+	5635:  "sfmsso",
+	5636:  "sfm-db-server",
+	5637:  "cssc",
+	5638:  "flcrs",
+	5639:  "ics",
+	5646:  "vfmobile",
+	5666:  "nrpe",
+	5670:  "filemq",
+	5671:  "amqps",
+	5672:  "amqp",
+	5673:  "jms",
+	5674:  "hyperscsi-port",
+	5675:  "v5ua",
+	5676:  "raadmin",
+	5677:  "questdb2-lnchr",
+	5678:  "rrac",
+	5679:  "dccm",
+	5680:  "auriga-router",
+	5681:  "ncxcp",
+	5688:  "ggz",
+	5689:  "qmvideo",
+	5693:  "rbsystem",
+	5696:  "kmip",
+	5700:  "supportassist",
+	5705:  "storageos",
+	5713:  "proshareaudio",
+	5714:  "prosharevideo",
+	5715:  "prosharedata",
+	5716:  "prosharerequest",
+	5717:  "prosharenotify",
+	5718:  "dpm",
+	5719:  "dpm-agent",
+	5720:  "ms-licensing",
+	5721:  "dtpt",
+	5722:  "msdfsr",
+	5723:  "omhs",
+	5724:  "omsdk",
+	5725:  "ms-ilm",
+	5726:  "ms-ilm-sts",
+	5727:  "asgenf",
+	5728:  "io-dist-data",
+	5729:  "openmail",
+	5730:  "unieng",
+	5741:  "ida-discover1",
+	5742:  "ida-discover2",
+	5743:  "watchdoc-pod",
+	5744:  "watchdoc",
+	5745:  "fcopy-server",
+	5746:  "fcopys-server",
+	5747:  "tunatic",
+	5748:  "tunalyzer",
+	5750:  "rscd",
+	5755:  "openmailg",
+	5757:  "x500ms",
+	5766:  "openmailns",
+	5767:  "s-openmail",
+	5768:  "openmailpxy",
+	5769:  "spramsca",
+	5770:  "spramsd",
+	5771:  "netagent",
+	5777:  "dali-port",
+	5780:  "vts-rpc",
+	5781:  "3par-evts",
+	5782:  "3par-mgmt",
+	5783:  "3par-mgmt-ssl",
+	5785:  "3par-rcopy",
+	5793:  "xtreamx",
+	5813:  "icmpd",
+	5814:  "spt-automation",
+	5841:  "shiprush-d-ch",
+	5842:  "reversion",
+	5859:  "wherehoo",
+	5863:  "ppsuitemsg",
+	5868:  "diameters",
+	5883:  "jute",
+	5900:  "rfb",
+	5910:  "cm",
+	5911:  "cpdlc",
+	5912:  "fis",
+	5913:  "ads-c",
+	5963:  "indy",
+	5968:  "mppolicy-v5",
+	5969:  "mppolicy-mgr",
+	5984:  "couchdb",
+	5985:  "wsman",
+	5986:  "wsmans",
+	5987:  "wbem-rmi",
+	5988:  "wbem-http",
+	5989:  "wbem-https",
+	5990:  "wbem-exp-https",
+	5991:  "nuxsl",
+	5992:  "consul-insight",
+	5993:  "cim-rs",
+	5999:  "cvsup",
+	6064:  "ndl-ahp-svc",
+	6065:  "winpharaoh",
+	6066:  "ewctsp",
+	6068:  "gsmp-ancp",
+	6069:  "trip",
+	6070:  "messageasap",
+	6071:  "ssdtp",
+	6072:  "diagnose-proc",
+	6073:  "directplay8",
+	6074:  "max",
+	6075:  "dpm-acm",
+	6076:  "msft-dpm-cert",
+	6077:  "iconstructsrv",
+	6084:  "reload-config",
+	6085:  "konspire2b",
+	6086:  "pdtp",
+	6087:  "ldss",
+	6088:  "doglms",
+	6099:  "raxa-mgmt",
+	6100:  "synchronet-db",
+	6101:  "synchronet-rtc",
+	6102:  "synchronet-upd",
+	6103:  "rets",
+	6104:  "dbdb",
+	6105:  "primaserver",
+	6106:  "mpsserver",
+	6107:  "etc-control",
+	6108:  "sercomm-scadmin",
+	6109:  "globecast-id",
+	6110:  "softcm",
+	6111:  "spc",
+	6112:  "dtspcd",
+	6113:  "dayliteserver",
+	6114:  "wrspice",
+	6115:  "xic",
+	6116:  "xtlserv",
+	6117:  "daylitetouch",
+	6121:  "spdy",
+	6122:  "bex-webadmin",
+	6123:  "backup-express",
+	6124:  "pnbs",
+	6130:  "damewaremobgtwy",
+	6133:  "nbt-wol",
+	6140:  "pulsonixnls",
+	6141:  "meta-corp",
+	6142:  "aspentec-lm",
+	6143:  "watershed-lm",
+	6144:  "statsci1-lm",
+	6145:  "statsci2-lm",
+	6146:  "lonewolf-lm",
+	6147:  "montage-lm",
+	6148:  "ricardo-lm",
+	6149:  "tal-pod",
+	6159:  "efb-aci",
+	6160:  "ecmp",
+	6161:  "patrol-ism",
+	6162:  "patrol-coll",
+	6163:  "pscribe",
+	6200:  "lm-x",
+	6209:  "qmtps",
+	6222:  "radmind",
+	6241:  "jeol-nsdtp-1",
+	6242:  "jeol-nsdtp-2",
+	6243:  "jeol-nsdtp-3",
+	6244:  "jeol-nsdtp-4",
+	6251:  "tl1-raw-ssl",
+	6252:  "tl1-ssh",
+	6253:  "crip",
+	6267:  "gld",
+	6268:  "grid",
+	6269:  "grid-alt",
+	6300:  "bmc-grx",
+	6301:  "bmc-ctd-ldap",
+	6306:  "ufmp",
+	6315:  "scup",
+	6316:  "abb-escp",
+	6317:  "nav-data-cmd",
+	6320:  "repsvc",
+	6321:  "emp-server1",
+	6322:  "emp-server2",
+	6324:  "hrd-ncs",
+	6325:  "dt-mgmtsvc",
+	6326:  "dt-vra",
+	6343:  "sflow",
+	6344:  "streletz",
+	6346:  "gnutella-svc",
+	6347:  "gnutella-rtr",
+	6350:  "adap",
+	6355:  "pmcs",
+	6360:  "metaedit-mu",
+	6370:  "metaedit-se",
+	6379:  "redis",
+	6382:  "metatude-mds",
+	6389:  "clariion-evr01",
+	6390:  "metaedit-ws",
+	6417:  "faxcomservice",
+	6418:  "syserverremote",
+	6419:  "svdrp",
+	6420:  "nim-vdrshell",
+	6421:  "nim-wan",
+	6432:  "pgbouncer",
+	6442:  "tarp",
+	6443:  "sun-sr-https",
+	6444:  "sge-qmaster",
+	6445:  "sge-execd",
+	6446:  "mysql-proxy",
+	6455:  "skip-cert-recv",
+	6456:  "skip-cert-send",
+	6464:  "ieee11073-20701",
+	6471:  "lvision-lm",
+	6480:  "sun-sr-http",
+	6481:  "servicetags",
+	6482:  "ldoms-mgmt",
+	6483:  "SunVTS-RMI",
+	6484:  "sun-sr-jms",
+	6485:  "sun-sr-iiop",
+	6486:  "sun-sr-iiops",
+	6487:  "sun-sr-iiop-aut",
+	6488:  "sun-sr-jmx",
+	6489:  "sun-sr-admin",
+	6500:  "boks",
+	6501:  "boks-servc",
+	6502:  "boks-servm",
+	6503:  "boks-clntd",
+	6505:  "badm-priv",
+	6506:  "badm-pub",
+	6507:  "bdir-priv",
+	6508:  "bdir-pub",
+	6509:  "mgcs-mfp-port",
+	6510:  "mcer-port",
+	6513:  "netconf-tls",
+	6514:  "syslog-tls",
+	6515:  "elipse-rec",
+	6543:  "lds-distrib",
+	6544:  "lds-dump",
+	6547:  "apc-6547",
+	6548:  "apc-6548",
+	6549:  "apc-6549",
+	6550:  "fg-sysupdate",
+	6551:  "sum",
+	6558:  "xdsxdm",
+	6566:  "sane-port",
+	6568:  "canit-store",
+	6579:  "affiliate",
+	6580:  "parsec-master",
+	6581:  "parsec-peer",
+	6582:  "parsec-game",
+	6583:  "joaJewelSuite",
+	6600:  "mshvlm",
+	6601:  "mstmg-sstp",
+	6602:  "wsscomfrmwk",
+	6619:  "odette-ftps",
+	6620:  "kftp-data",
+	6621:  "kftp",
+	6622:  "mcftp",
+	6623:  "ktelnet",
+	6624:  "datascaler-db",
+	6625:  "datascaler-ctl",
+	6626:  "wago-service",
+	6627:  "nexgen",
+	6628:  "afesc-mc",
+	6629:  "nexgen-aux",
+	6632:  "mxodbc-connect",
+	6640:  "ovsdb",
+	6653:  "openflow",
+	6655:  "pcs-sf-ui-man",
+	6656:  "emgmsg",
+	6670:  "vocaltec-gold",
+	6671:  "p4p-portal",
+	6672:  "vision-server",
+	6673:  "vision-elmd",
+	6678:  "vfbp",
+	6679:  "osaut",
+	6687:  "clever-ctrace",
+	6688:  "clever-tcpip",
+	6689:  "tsa",
+	6690:  "cleverdetect",
+	6697:  "ircs-u",
+	6701:  "kti-icad-srvr",
+	6702:  "e-design-net",
+	6703:  "e-design-web",
+	6714:  "ibprotocol",
+	6715:  "fibotrader-com",
+	6716:  "princity-agent",
+	6767:  "bmc-perf-agent",
+	6768:  "bmc-perf-mgrd",
+	6769:  "adi-gxp-srvprt",
+	6770:  "plysrv-http",
+	6771:  "plysrv-https",
+	6777:  "ntz-tracker",
+	6778:  "ntz-p2p-storage",
+	6785:  "dgpf-exchg",
+	6786:  "smc-jmx",
+	6787:  "smc-admin",
+	6788:  "smc-http",
+	6789:  "radg",
+	6790:  "hnmp",
+	6791:  "hnm",
+	6801:  "acnet",
+	6817:  "pentbox-sim",
+	6831:  "ambit-lm",
+	6841:  "netmo-default",
+	6842:  "netmo-http",
+	6850:  "iccrushmore",
+	6868:  "acctopus-cc",
+	6888:  "muse",
+	6900:  "rtimeviewer",
+	6901:  "jetstream",
+	6935:  "ethoscan",
+	6936:  "xsmsvc",
+	6946:  "bioserver",
+	6951:  "otlp",
+	6961:  "jmact3",
+	6962:  "jmevt2",
+	6963:  "swismgr1",
+	6964:  "swismgr2",
+	6965:  "swistrap",
+	6966:  "swispol",
+	6969:  "acmsoda",
+	6970:  "conductor",
+	6997:  "MobilitySrv",
+	6998:  "iatp-highpri",
+	6999:  "iatp-normalpri",
+	7000:  "afs3-fileserver",
+	7001:  "afs3-callback",
+	7002:  "afs3-prserver",
+	7003:  "afs3-vlserver",
+	7004:  "afs3-kaserver",
+	7005:  "afs3-volser",
+	7006:  "afs3-errors",
+	7007:  "afs3-bos",
+	7008:  "afs3-update",
+	7009:  "afs3-rmtsys",
+	7010:  "ups-onlinet",
+	7011:  "talon-disc",
+	7012:  "talon-engine",
+	7013:  "microtalon-dis",
+	7014:  "microtalon-com",
+	7015:  "talon-webserver",
+	7016:  "spg",
+	7017:  "grasp",
+	7018:  "fisa-svc",
+	7019:  "doceri-ctl",
+	7020:  "dpserve",
+	7021:  "dpserveadmin",
+	7022:  "ctdp",
+	7023:  "ct2nmcs",
+	7024:  "vmsvc",
+	7025:  "vmsvc-2",
+	7030:  "op-probe",
+	7031:  "iposplanet",
+	7070:  "arcp",
+	7071:  "iwg1",
+	7073:  "martalk",
+	7080:  "empowerid",
+	7099:  "lazy-ptop",
+	7100:  "font-service",
+	7101:  "elcn",
+	7117:  "rothaga",
+	7121:  "virprot-lm",
+	7128:  "scenidm",
+	7129:  "scenccs",
+	7161:  "cabsm-comm",
+	7162:  "caistoragemgr",
+	7163:  "cacsambroker",
+	7164:  "fsr",
+	7165:  "doc-server",
+	7166:  "aruba-server",
+	7167:  "casrmagent",
+	7168:  "cnckadserver",
+	7169:  "ccag-pib",
+	7170:  "nsrp",
+	7171:  "drm-production",
+	7172:  "metalbend",
+	7173:  "zsecure",
+	7174:  "clutild",
+	7200:  "fodms",
+	7201:  "dlip",
+	7202:  "pon-ictp",
+	7215:  "PS-Server",
+	7216:  "PS-Capture-Pro",
+	7227:  "ramp",
+	7228:  "citrixupp",
+	7229:  "citrixuppg",
+	7236:  "display",
+	7237:  "pads",
+	7244:  "frc-hicp",
+	7262:  "cnap",
+	7272:  "watchme-7272",
+	7273:  "oma-rlp",
+	7274:  "oma-rlp-s",
+	7275:  "oma-ulp",
+	7276:  "oma-ilp",
+	7277:  "oma-ilp-s",
+	7278:  "oma-dcdocbs",
+	7279:  "ctxlic",
+	7280:  "itactionserver1",
+	7281:  "itactionserver2",
+	7282:  "mzca-action",
+	7283:  "genstat",
+	7365:  "lcm-server",
+	7391:  "mindfilesys",
+	7392:  "mrssrendezvous",
+	7393:  "nfoldman",
+	7394:  "fse",
+	7395:  "winqedit",
+	7397:  "hexarc",
+	7400:  "rtps-discovery",
+	7401:  "rtps-dd-ut",
+	7402:  "rtps-dd-mt",
+	7410:  "ionixnetmon",
+	7411:  "daqstream",
+	7421:  "mtportmon",
+	7426:  "pmdmgr",
+	7427:  "oveadmgr",
+	7428:  "ovladmgr",
+	7429:  "opi-sock",
+	7430:  "xmpv7",
+	7431:  "pmd",
+	7437:  "faximum",
+	7443:  "oracleas-https",
+	7471:  "sttunnel",
+	7473:  "rise",
+	7474:  "neo4j",
+	7478:  "openit",
+	7491:  "telops-lmd",
+	7500:  "silhouette",
+	7501:  "ovbus",
+	7508:  "adcp",
+	7509:  "acplt",
+	7510:  "ovhpas",
+	7511:  "pafec-lm",
+	7542:  "saratoga",
+	7543:  "atul",
+	7544:  "nta-ds",
+	7545:  "nta-us",
+	7546:  "cfs",
+	7547:  "cwmp",
+	7548:  "tidp",
+	7549:  "nls-tl",
+	7551:  "controlone-con",
+	7560:  "sncp",
+	7563:  "cfw",
+	7566:  "vsi-omega",
+	7569:  "dell-eql-asm",
+	7570:  "aries-kfinder",
+	7574:  "coherence",
+	7588:  "sun-lm",
+	7606:  "mipi-debug",
+	7624:  "indi",
+	7626:  "simco",
+	7627:  "soap-http",
+	7628:  "zen-pawn",
+	7629:  "xdas",
+	7630:  "hawk",
+	7631:  "tesla-sys-msg",
+	7633:  "pmdfmgt",
+	7648:  "cuseeme",
+	7672:  "imqstomp",
+	7673:  "imqstomps",
+	7674:  "imqtunnels",
+	7675:  "imqtunnel",
+	7676:  "imqbrokerd",
+	7677:  "sun-user-https",
+	7680:  "pando-pub",
+	7683:  "dmt",
+	7687:  "bolt",
+	7689:  "collaber",
+	7697:  "klio",
+	7700:  "em7-secom",
+	7707:  "sync-em7",
+	7708:  "scinet",
+	7720:  "medimageportal",
+	7724:  "nsdeepfreezectl",
+	7725:  "nitrogen",
+	7726:  "freezexservice",
+	7727:  "trident-data",
+	7728:  "osvr",
+	7734:  "smip",
+	7738:  "aiagent",
+	7741:  "scriptview",
+	7742:  "msss",
+	7743:  "sstp-1",
+	7744:  "raqmon-pdu",
+	7747:  "prgp",
+	7775:  "inetfs",
+	7777:  "cbt",
+	7778:  "interwise",
+	7779:  "vstat",
+	7781:  "accu-lmgr",
+	7786:  "minivend",
+	7787:  "popup-reminders",
+	7789:  "office-tools",
+	7794:  "q3ade",
+	7797:  "pnet-conn",
+	7798:  "pnet-enc",
+	7799:  "altbsdp",
+	7800:  "asr",
+	7801:  "ssp-client",
+	7810:  "rbt-wanopt",
+	7845:  "apc-7845",
+	7846:  "apc-7846",
+	7847:  "csoauth",
+	7869:  "mobileanalyzer",
+	7870:  "rbt-smc",
+	7871:  "mdm",
+	7878:  "owms",
+	7880:  "pss",
+	7887:  "ubroker",
+	7900:  "mevent",
+	7901:  "tnos-sp",
+	7902:  "tnos-dp",
+	7903:  "tnos-dps",
+	7913:  "qo-secure",
+	7932:  "t2-drm",
+	7933:  "t2-brm",
+	7962:  "generalsync",
+	7967:  "supercell",
+	7979:  "micromuse-ncps",
+	7980:  "quest-vista",
+	7981:  "sossd-collect",
+	7982:  "sossd-agent",
+	7997:  "pushns",
+	7999:  "irdmi2",
+	8000:  "irdmi",
+	8001:  "vcom-tunnel",
+	8002:  "teradataordbms",
+	8003:  "mcreport",
+	8005:  "mxi",
+	8006:  "wpl-analytics",
+	8007:  "warppipe",
+	8008:  "http-alt",
+	8019:  "qbdb",
+	8020:  "intu-ec-svcdisc",
+	8021:  "intu-ec-client",
+	8022:  "oa-system",
+	8025:  "ca-audit-da",
+	8026:  "ca-audit-ds",
+	8032:  "pro-ed",
+	8033:  "mindprint",
+	8034:  "vantronix-mgmt",
+	8040:  "ampify",
+	8041:  "enguity-xccetp",
+	8042:  "fs-agent",
+	8043:  "fs-server",
+	8044:  "fs-mgmt",
+	8051:  "rocrail",
+	8052:  "senomix01",
+	8053:  "senomix02",
+	8054:  "senomix03",
+	8055:  "senomix04",
+	8056:  "senomix05",
+	8057:  "senomix06",
+	8058:  "senomix07",
+	8059:  "senomix08",
+	8066:  "toad-bi-appsrvr",
+	8067:  "infi-async",
+	8070:  "ucs-isc",
+	8074:  "gadugadu",
+	8077:  "mles",
+	8080:  "http-alt",
+	8081:  "sunproxyadmin",
+	8082:  "us-cli",
+	8083:  "us-srv",
+	8086:  "d-s-n",
+	8087:  "simplifymedia",
+	8088:  "radan-http",
+	8090:  "opsmessaging",
+	8091:  "jamlink",
+	8097:  "sac",
+	8100:  "xprint-server",
+	8101:  "ldoms-migr",
+	8102:  "kz-migr",
+	8115:  "mtl8000-matrix",
+	8116:  "cp-cluster",
+	8117:  "purityrpc",
+	8118:  "privoxy",
+	8121:  "apollo-data",
+	8122:  "apollo-admin",
+	8128:  "paycash-online",
+	8129:  "paycash-wbp",
+	8130:  "indigo-vrmi",
+	8131:  "indigo-vbcp",
+	8132:  "dbabble",
+	8140:  "puppet",
+	8148:  "isdd",
+	8153:  "quantastor",
+	8160:  "patrol",
+	8161:  "patrol-snmp",
+	8162:  "lpar2rrd",
+	8181:  "intermapper",
+	8182:  "vmware-fdm",
+	8183:  "proremote",
+	8184:  "itach",
+	8190:  "gcp-rphy",
+	8191:  "limnerpressure",
+	8192:  "spytechphone",
+	8194:  "blp1",
+	8195:  "blp2",
+	8199:  "vvr-data",
+	8200:  "trivnet1",
+	8201:  "trivnet2",
+	8204:  "lm-perfworks",
+	8205:  "lm-instmgr",
+	8206:  "lm-dta",
+	8207:  "lm-sserver",
+	8208:  "lm-webwatcher",
+	8230:  "rexecj",
+	8243:  "synapse-nhttps",
+	8270:  "robot-remote",
+	8276:  "pando-sec",
+	8280:  "synapse-nhttp",
+	8282:  "libelle",
+	8292:  "blp3",
+	8293:  "hiperscan-id",
+	8294:  "blp4",
+	8300:  "tmi",
+	8301:  "amberon",
+	8313:  "hub-open-net",
+	8320:  "tnp-discover",
+	8321:  "tnp",
+	8322:  "garmin-marine",
+	8351:  "server-find",
+	8376:  "cruise-enum",
+	8377:  "cruise-swroute",
+	8378:  "cruise-config",
+	8379:  "cruise-diags",
+	8380:  "cruise-update",
+	8383:  "m2mservices",
+	8400:  "cvd",
+	8401:  "sabarsd",
+	8402:  "abarsd",
+	8403:  "admind",
+	8404:  "svcloud",
+	8405:  "svbackup",
+	8415:  "dlpx-sp",
+	8416:  "espeech",
+	8417:  "espeech-rtp",
+	8423:  "aritts",
+	8442:  "cybro-a-bus",
+	8443:  "pcsync-https",
+	8444:  "pcsync-http",
+	8445:  "copy",
+	8450:  "npmp",
+	8457:  "nexentamv",
+	8470:  "cisco-avp",
+	8471:  "pim-port",
+	8472:  "otv",
+	8473:  "vp2p",
+	8474:  "noteshare",
+	8500:  "fmtp",
+	8501:  "cmtp-mgt",
+	8502:  "ftnmtp",
+	8554:  "rtsp-alt",
+	8555:  "d-fence",
+	8567:  "dof-tunnel",
+	8600:  "asterix",
+	8610:  "canon-mfnp",
+	8611:  "canon-bjnp1",
+	8612:  "canon-bjnp2",
+	8613:  "canon-bjnp3",
+	8614:  "canon-bjnp4",
+	8615:  "imink",
+	8665:  "monetra",
+	8666:  "monetra-admin",
+	8675:  "msi-cps-rm",
+	8686:  "sun-as-jmxrmi",
+	8688:  "openremote-ctrl",
+	8699:  "vnyx",
+	8711:  "nvc",
+	8733:  "ibus",
+	8750:  "dey-keyneg",
+	8763:  "mc-appserver",
+	8764:  "openqueue",
+	8765:  "ultraseek-http",
+	8766:  "amcs",
+	8770:  "dpap",
+	8778:  "uec",
+	8786:  "msgclnt",
+	8787:  "msgsrvr",
+	8793:  "acd-pm",
+	8800:  "sunwebadmin",
+	8804:  "truecm",
+	8873:  "dxspider",
+	8880:  "cddbp-alt",
+	8881:  "galaxy4d",
+	8883:  "secure-mqtt",
+	8888:  "ddi-tcp-1",
+	8889:  "ddi-tcp-2",
+	8890:  "ddi-tcp-3",
+	8891:  "ddi-tcp-4",
+	8892:  "ddi-tcp-5",
+	8893:  "ddi-tcp-6",
+	8894:  "ddi-tcp-7",
+	8899:  "ospf-lite",
+	8900:  "jmb-cds1",
+	8901:  "jmb-cds2",
+	8910:  "manyone-http",
+	8911:  "manyone-xml",
+	8912:  "wcbackup",
+	8913:  "dragonfly",
+	8937:  "twds",
+	8953:  "ub-dns-control",
+	8954:  "cumulus-admin",
+	8980:  "nod-provider",
+	8989:  "sunwebadmins",
+	8990:  "http-wmap",
+	8991:  "https-wmap",
+	8997:  "oracle-ms-ens",
+	8998:  "canto-roboflow",
+	8999:  "bctp",
+	9000:  "cslistener",
+	9001:  "etlservicemgr",
+	9002:  "dynamid",
+	9005:  "golem",
+	9008:  "ogs-server",
+	9009:  "pichat",
+	9010:  "sdr",
+	9020:  "tambora",
+	9021:  "panagolin-ident",
+	9022:  "paragent",
+	9023:  "swa-1",
+	9024:  "swa-2",
+	9025:  "swa-3",
+	9026:  "swa-4",
+	9050:  "versiera",
+	9051:  "fio-cmgmt",
+	9060:  "CardWeb-IO",
+	9080:  "glrpc",
+	9083:  "emc-pp-mgmtsvc",
+	9084:  "aurora",
+	9085:  "ibm-rsyscon",
+	9086:  "net2display",
+	9087:  "classic",
+	9088:  "sqlexec",
+	9089:  "sqlexec-ssl",
+	9090:  "websm",
+	9091:  "xmltec-xmlmail",
+	9092:  "XmlIpcRegSvc",
+	9093:  "copycat",
+	9100:  "hp-pdl-datastr",
+	9101:  "bacula-dir",
+	9102:  "bacula-fd",
+	9103:  "bacula-sd",
+	9104:  "peerwire",
+	9105:  "xadmin",
+	9106:  "astergate",
+	9107:  "astergatefax",
+	9119:  "mxit",
+	9122:  "grcmp",
+	9123:  "grcp",
+	9131:  "dddp",
+	9160:  "apani1",
+	9161:  "apani2",
+	9162:  "apani3",
+	9163:  "apani4",
+	9164:  "apani5",
+	9191:  "sun-as-jpda",
+	9200:  "wap-wsp",
+	9201:  "wap-wsp-wtp",
+	9202:  "wap-wsp-s",
+	9203:  "wap-wsp-wtp-s",
+	9204:  "wap-vcard",
+	9205:  "wap-vcal",
+	9206:  "wap-vcard-s",
+	9207:  "wap-vcal-s",
+	9208:  "rjcdb-vcards",
+	9209:  "almobile-system",
+	9210:  "oma-mlp",
+	9211:  "oma-mlp-s",
+	9212:  "serverviewdbms",
+	9213:  "serverstart",
+	9214:  "ipdcesgbs",
+	9215:  "insis",
+	9216:  "acme",
+	9217:  "fsc-port",
+	9222:  "teamcoherence",
+	9255:  "mon",
+	9278:  "pegasus",
+	9279:  "pegasus-ctl",
+	9280:  "pgps",
+	9281:  "swtp-port1",
+	9282:  "swtp-port2",
+	9283:  "callwaveiam",
+	9284:  "visd",
+	9285:  "n2h2server",
+	9287:  "cumulus",
+	9292:  "armtechdaemon",
+	9293:  "storview",
+	9294:  "armcenterhttp",
+	9295:  "armcenterhttps",
+	9300:  "vrace",
+	9306:  "sphinxql",
+	9312:  "sphinxapi",
+	9318:  "secure-ts",
+	9321:  "guibase",
+	9343:  "mpidcmgr",
+	9344:  "mphlpdmc",
+	9345:  "rancher",
+	9346:  "ctechlicensing",
+	9374:  "fjdmimgr",
+	9380:  "boxp",
+	9387:  "d2dconfig",
+	9388:  "d2ddatatrans",
+	9389:  "adws",
+	9390:  "otp",
+	9396:  "fjinvmgr",
+	9397:  "mpidcagt",
+	9400:  "sec-t4net-srv",
+	9401:  "sec-t4net-clt",
+	9402:  "sec-pc2fax-srv",
+	9418:  "git",
+	9443:  "tungsten-https",
+	9444:  "wso2esb-console",
+	9445:  "mindarray-ca",
+	9450:  "sntlkeyssrvr",
+	9500:  "ismserver",
+	9535:  "mngsuite",
+	9536:  "laes-bf",
+	9555:  "trispen-sra",
+	9592:  "ldgateway",
+	9593:  "cba8",
+	9594:  "msgsys",
+	9595:  "pds",
+	9596:  "mercury-disc",
+	9597:  "pd-admin",
+	9598:  "vscp",
+	9599:  "robix",
+	9600:  "micromuse-ncpw",
+	9612:  "streamcomm-ds",
+	9614:  "iadt-tls",
+	9616:  "erunbook-agent",
+	9617:  "erunbook-server",
+	9618:  "condor",
+	9628:  "odbcpathway",
+	9629:  "uniport",
+	9630:  "peoctlr",
+	9631:  "peocoll",
+	9640:  "pqsflows",
+	9666:  "zoomcp",
+	9667:  "xmms2",
+	9668:  "tec5-sdctp",
+	9694:  "client-wakeup",
+	9695:  "ccnx",
+	9700:  "board-roar",
+	9747:  "l5nas-parchan",
+	9750:  "board-voip",
+	9753:  "rasadv",
+	9762:  "tungsten-http",
+	9800:  "davsrc",
+	9801:  "sstp-2",
+	9802:  "davsrcs",
+	9875:  "sapv1",
+	9876:  "sd",
+	9888:  "cyborg-systems",
+	9889:  "gt-proxy",
+	9898:  "monkeycom",
+	9900:  "iua",
+	9909:  "domaintime",
+	9911:  "sype-transport",
+	9925:  "xybrid-cloud",
+	9950:  "apc-9950",
+	9951:  "apc-9951",
+	9952:  "apc-9952",
+	9953:  "acis",
+	9954:  "hinp",
+	9955:  "alljoyn-stm",
+	9966:  "odnsp",
+	9978:  "xybrid-rt",
+	9979:  "visweather",
+	9981:  "pumpkindb",
+	9987:  "dsm-scm-target",
+	9988:  "nsesrvr",
+	9990:  "osm-appsrvr",
+	9991:  "osm-oev",
+	9992:  "palace-1",
+	9993:  "palace-2",
+	9994:  "palace-3",
+	9995:  "palace-4",
+	9996:  "palace-5",
+	9997:  "palace-6",
+	9998:  "distinct32",
+	9999:  "distinct",
+	10000: "ndmp",
+	10001: "scp-config",
+	10002: "documentum",
+	10003: "documentum-s",
+	10004: "emcrmirccd",
+	10005: "emcrmird",
+	10006: "netapp-sync",
+	10007: "mvs-capacity",
+	10008: "octopus",
+	10009: "swdtp-sv",
+	10010: "rxapi",
+	10020: "abb-hw",
+	10050: "zabbix-agent",
+	10051: "zabbix-trapper",
+	10055: "qptlmd",
+	10080: "amanda",
+	10081: "famdc",
+	10100: "itap-ddtp",
+	10101: "ezmeeting-2",
+	10102: "ezproxy-2",
+	10103: "ezrelay",
+	10104: "swdtp",
+	10107: "bctp-server",
+	10110: "nmea-0183",
+	10113: "netiq-endpoint",
+	10114: "netiq-qcheck",
+	10115: "netiq-endpt",
+	10116: "netiq-voipa",
+	10117: "iqrm",
+	10125: "cimple",
+	10128: "bmc-perf-sd",
+	10129: "bmc-gms",
+	10160: "qb-db-server",
+	10161: "snmptls",
+	10162: "snmptls-trap",
+	10200: "trisoap",
+	10201: "rsms",
+	10252: "apollo-relay",
+	10260: "axis-wimp-port",
+	10261: "tile-ml",
+	10288: "blocks",
+	10321: "cosir",
+	10540: "MOS-lower",
+	10541: "MOS-upper",
+	10542: "MOS-aux",
+	10543: "MOS-soap",
+	10544: "MOS-soap-opt",
+	10548: "serverdocs",
+	10631: "printopia",
+	10800: "gap",
+	10805: "lpdg",
+	10809: "nbd",
+	10860: "helix",
+	10880: "bveapi",
+	10933: "octopustentacle",
+	10990: "rmiaux",
+	11000: "irisa",
+	11001: "metasys",
+	11095: "weave",
+	11103: "origo-sync",
+	11104: "netapp-icmgmt",
+	11105: "netapp-icdata",
+	11106: "sgi-lk",
+	11109: "sgi-dmfmgr",
+	11110: "sgi-soap",
+	11111: "vce",
+	11112: "dicom",
+	11161: "suncacao-snmp",
+	11162: "suncacao-jmxmp",
+	11163: "suncacao-rmi",
+	11164: "suncacao-csa",
+	11165: "suncacao-websvc",
+	11172: "oemcacao-jmxmp",
+	11173: "t5-straton",
+	11174: "oemcacao-rmi",
+	11175: "oemcacao-websvc",
+	11201: "smsqp",
+	11202: "dcsl-backup",
+	11208: "wifree",
+	11211: "memcache",
+	11319: "imip",
+	11320: "imip-channels",
+	11321: "arena-server",
+	11367: "atm-uhas",
+	11371: "hkp",
+	11489: "asgcypresstcps",
+	11600: "tempest-port",
+	11623: "emc-xsw-dconfig",
+	11720: "h323callsigalt",
+	11723: "emc-xsw-dcache",
+	11751: "intrepid-ssl",
+	11796: "lanschool",
+	11876: "xoraya",
+	11967: "sysinfo-sp",
+	12000: "entextxid",
+	12001: "entextnetwk",
+	12002: "entexthigh",
+	12003: "entextmed",
+	12004: "entextlow",
+	12005: "dbisamserver1",
+	12006: "dbisamserver2",
+	12007: "accuracer",
+	12008: "accuracer-dbms",
+	12010: "edbsrvr",
+	12012: "vipera",
+	12013: "vipera-ssl",
+	12109: "rets-ssl",
+	12121: "nupaper-ss",
+	12168: "cawas",
+	12172: "hivep",
+	12300: "linogridengine",
+	12302: "rads",
+	12321: "warehouse-sss",
+	12322: "warehouse",
+	12345: "italk",
+	12753: "tsaf",
+	12865: "netperf",
+	13160: "i-zipqd",
+	13216: "bcslogc",
+	13217: "rs-pias",
+	13218: "emc-vcas-tcp",
+	13223: "powwow-client",
+	13224: "powwow-server",
+	13400: "doip-data",
+	13720: "bprd",
+	13721: "bpdbm",
+	13722: "bpjava-msvc",
+	13724: "vnetd",
+	13782: "bpcd",
+	13783: "vopied",
+	13785: "nbdb",
+	13786: "nomdb",
+	13818: "dsmcc-config",
+	13819: "dsmcc-session",
+	13820: "dsmcc-passthru",
+	13821: "dsmcc-download",
+	13822: "dsmcc-ccp",
+	13823: "bmdss",
+	13894: "ucontrol",
+	13929: "dta-systems",
+	13930: "medevolve",
+	14000: "scotty-ft",
+	14001: "sua",
+	14033: "sage-best-com1",
+	14034: "sage-best-com2",
+	14141: "vcs-app",
+	14142: "icpp",
+	14143: "icpps",
+	14145: "gcm-app",
+	14149: "vrts-tdd",
+	14150: "vcscmd",
+	14154: "vad",
+	14250: "cps",
+	14414: "ca-web-update",
+	14500: "xpra",
+	14936: "hde-lcesrvr-1",
+	14937: "hde-lcesrvr-2",
+	15000: "hydap",
+	15002: "onep-tls",
+	15345: "xpilot",
+	15363: "3link",
+	15555: "cisco-snat",
+	15660: "bex-xr",
+	15740: "ptp",
+	15999: "programmar",
+	16000: "fmsas",
+	16001: "fmsascon",
+	16002: "gsms",
+	16020: "jwpc",
+	16021: "jwpc-bin",
+	16161: "sun-sea-port",
+	16162: "solaris-audit",
+	16309: "etb4j",
+	16310: "pduncs",
+	16311: "pdefmns",
+	16360: "netserialext1",
+	16361: "netserialext2",
+	16367: "netserialext3",
+	16368: "netserialext4",
+	16384: "connected",
+	16385: "rdgs",
+	16619: "xoms",
+	16665: "axon-tunnel",
+	16789: "cadsisvr",
+	16900: "newbay-snc-mc",
+	16950: "sgcip",
+	16991: "intel-rci-mp",
+	16992: "amt-soap-http",
+	16993: "amt-soap-https",
+	16994: "amt-redir-tcp",
+	16995: "amt-redir-tls",
+	17007: "isode-dua",
+	17184: "vestasdlp",
+	17185: "soundsvirtual",
+	17219: "chipper",
+	17220: "avtp",
+	17221: "avdecc",
+	17223: "isa100-gci",
+	17225: "trdp-md",
+	17234: "integrius-stp",
+	17235: "ssh-mgmt",
+	17500: "db-lsp",
+	17555: "ailith",
+	17729: "ea",
+	17754: "zep",
+	17755: "zigbee-ip",
+	17756: "zigbee-ips",
+	17777: "sw-orion",
+	18000: "biimenu",
+	18104: "radpdf",
+	18136: "racf",
+	18181: "opsec-cvp",
+	18182: "opsec-ufp",
+	18183: "opsec-sam",
+	18184: "opsec-lea",
+	18185: "opsec-omi",
+	18186: "ohsc",
+	18187: "opsec-ela",
+	18241: "checkpoint-rtm",
+	18242: "iclid",
+	18243: "clusterxl",
+	18262: "gv-pf",
+	18463: "ac-cluster",
+	18634: "rds-ib",
+	18635: "rds-ip",
+	18668: "vdmmesh",
+	18769: "ique",
+	18881: "infotos",
+	18888: "apc-necmp",
+	19000: "igrid",
+	19007: "scintilla",
+	19020: "j-link",
+	19191: "opsec-uaa",
+	19194: "ua-secureagent",
+	19220: "cora",
+	19283: "keysrvr",
+	19315: "keyshadow",
+	19398: "mtrgtrans",
+	19410: "hp-sco",
+	19411: "hp-sca",
+	19412: "hp-sessmon",
+	19539: "fxuptp",
+	19540: "sxuptp",
+	19541: "jcp",
+	19998: "iec-104-sec",
+	19999: "dnp-sec",
+	20000: "dnp",
+	20001: "microsan",
+	20002: "commtact-http",
+	20003: "commtact-https",
+	20005: "openwebnet",
+	20013: "ss-idi",
+	20014: "opendeploy",
+	20034: "nburn-id",
+	20046: "tmophl7mts",
+	20048: "mountd",
+	20049: "nfsrdma",
+	20057: "avesterra",
+	20167: "tolfab",
+	20202: "ipdtp-port",
+	20222: "ipulse-ics",
+	20480: "emwavemsg",
+	20670: "track",
+	20999: "athand-mmp",
+	21000: "irtrans",
+	21010: "notezilla-lan",
+	21221: "aigairserver",
+	21553: "rdm-tfs",
+	21554: "dfserver",
+	21590: "vofr-gateway",
+	21800: "tvpm",
+	21845: "webphone",
+	21846: "netspeak-is",
+	21847: "netspeak-cs",
+	21848: "netspeak-acd",
+	21849: "netspeak-cps",
+	22000: "snapenetio",
+	22001: "optocontrol",
+	22002: "optohost002",
+	22003: "optohost003",
+	22004: "optohost004",
+	22005: "optohost004",
+	22125: "dcap",
+	22128: "gsidcap",
+	22222: "easyengine",
+	22273: "wnn6",
+	22305: "cis",
+	22335: "shrewd-control",
+	22343: "cis-secure",
+	22347: "wibukey",
+	22350: "codemeter",
+	22351: "codemeter-cmwan",
+	22537: "caldsoft-backup",
+	22555: "vocaltec-wconf",
+	22763: "talikaserver",
+	22800: "aws-brf",
+	22951: "brf-gw",
+	23000: "inovaport1",
+	23001: "inovaport2",
+	23002: "inovaport3",
+	23003: "inovaport4",
+	23004: "inovaport5",
+	23005: "inovaport6",
+	23053: "gntp",
+	23294: "5afe-dir",
+	23333: "elxmgmt",
+	23400: "novar-dbase",
+	23401: "novar-alarm",
+	23402: "novar-global",
+	23456: "aequus",
+	23457: "aequus-alt",
+	23546: "areaguard-neo",
+	24000: "med-ltp",
+	24001: "med-fsp-rx",
+	24002: "med-fsp-tx",
+	24003: "med-supp",
+	24004: "med-ovw",
+	24005: "med-ci",
+	24006: "med-net-svc",
+	24242: "filesphere",
+	24249: "vista-4gl",
+	24321: "ild",
+	24386: "intel-rci",
+	24465: "tonidods",
+	24554: "binkp",
+	24577: "bilobit",
+	24666: "sdtvwcam",
+	24676: "canditv",
+	24677: "flashfiler",
+	24678: "proactivate",
+	24680: "tcc-http",
+	24754: "cslg",
+	24922: "find",
+	25000: "icl-twobase1",
+	25001: "icl-twobase2",
+	25002: "icl-twobase3",
+	25003: "icl-twobase4",
+	25004: "icl-twobase5",
+	25005: "icl-twobase6",
+	25006: "icl-twobase7",
+	25007: "icl-twobase8",
+	25008: "icl-twobase9",
+	25009: "icl-twobase10",
+	25576: "sauterdongle",
+	25604: "idtp",
+	25793: "vocaltec-hos",
+	25900: "tasp-net",
+	25901: "niobserver",
+	25902: "nilinkanalyst",
+	25903: "niprobe",
+	26000: "quake",
+	26133: "scscp",
+	26208: "wnn6-ds",
+	26257: "cockroach",
+	26260: "ezproxy",
+	26261: "ezmeeting",
+	26262: "k3software-svr",
+	26263: "k3software-cli",
+	26486: "exoline-tcp",
+	26487: "exoconfig",
+	26489: "exonet",
+	27345: "imagepump",
+	27442: "jesmsjc",
+	27504: "kopek-httphead",
+	27782: "ars-vista",
+	27876: "astrolink",
+	27999: "tw-auth-key",
+	28000: "nxlmd",
+	28001: "pqsp",
+	28200: "voxelstorm",
+	28240: "siemensgsm",
+	28589: "bosswave",
+	29167: "otmp",
+	29999: "bingbang",
+	30000: "ndmps",
+	30001: "pago-services1",
+	30002: "pago-services2",
+	30003: "amicon-fpsu-ra",
+	30100: "rwp",
+	30260: "kingdomsonline",
+	30400: "gs-realtime",
+	30999: "ovobs",
+	31016: "ka-sddp",
+	31020: "autotrac-acp",
+	31400: "pace-licensed",
+	31416: "xqosd",
+	31457: "tetrinet",
+	31620: "lm-mon",
+	31685: "dsx-monitor",
+	31765: "gamesmith-port",
+	31948: "iceedcp-tx",
+	31949: "iceedcp-rx",
+	32034: "iracinghelper",
+	32249: "t1distproc60",
+	32400: "plex",
+	32483: "apm-link",
+	32635: "sec-ntb-clnt",
+	32636: "DMExpress",
+	32767: "filenet-powsrm",
+	32768: "filenet-tms",
+	32769: "filenet-rpc",
+	32770: "filenet-nch",
+	32771: "filenet-rmi",
+	32772: "filenet-pa",
+	32773: "filenet-cm",
+	32774: "filenet-re",
+	32775: "filenet-pch",
+	32776: "filenet-peior",
+	32777: "filenet-obrok",
+	32801: "mlsn",
+	32811: "retp",
+	32896: "idmgratm",
+	33060: "mysqlx",
+	33123: "aurora-balaena",
+	33331: "diamondport",
+	33333: "dgi-serv",
+	33334: "speedtrace",
+	33434: "traceroute",
+	33656: "snip-slave",
+	34249: "turbonote-2",
+	34378: "p-net-local",
+	34379: "p-net-remote",
+	34567: "dhanalakshmi",
+	34962: "profinet-rt",
+	34963: "profinet-rtm",
+	34964: "profinet-cm",
+	34980: "ethercat",
+	35000: "heathview",
+	35001: "rt-viewer",
+	35002: "rt-sound",
+	35003: "rt-devicemapper",
+	35004: "rt-classmanager",
+	35005: "rt-labtracker",
+	35006: "rt-helper",
+	35100: "axio-disc",
+	35354: "kitim",
+	35355: "altova-lm",
+	35356: "guttersnex",
+	35357: "openstack-id",
+	36001: "allpeers",
+	36524: "febooti-aw",
+	36602: "observium-agent",
+	36700: "mapx",
+	36865: "kastenxpipe",
+	37475: "neckar",
+	37483: "gdrive-sync",
+	37601: "eftp",
+	37654: "unisys-eportal",
+	38000: "ivs-database",
+	38001: "ivs-insertion",
+	38002: "cresco-control",
+	38201: "galaxy7-data",
+	38202: "fairview",
+	38203: "agpolicy",
+	38800: "sruth",
+	38865: "secrmmsafecopya",
+	39681: "turbonote-1",
+	40000: "safetynetp",
+	40404: "sptx",
+	40841: "cscp",
+	40842: "csccredir",
+	40843: "csccfirewall",
+	41111: "fs-qos",
+	41121: "tentacle",
+	41230: "z-wave-s",
+	41794: "crestron-cip",
+	41795: "crestron-ctp",
+	41796: "crestron-cips",
+	41797: "crestron-ctps",
+	42508: "candp",
+	42509: "candrp",
+	42510: "caerpc",
+	43000: "recvr-rc",
+	43188: "reachout",
+	43189: "ndm-agent-port",
+	43190: "ip-provision",
+	43191: "noit-transport",
+	43210: "shaperai",
+	43439: "eq3-update",
+	43440: "ew-mgmt",
+	43441: "ciscocsdb",
+	44123: "z-wave-tunnel",
+	44321: "pmcd",
+	44322: "pmcdproxy",
+	44323: "pmwebapi",
+	44444: "cognex-dataman",
+	44553: "rbr-debug",
+	44818: "EtherNet-IP-2",
+	44900: "m3da",
+	45000: "asmp",
+	45001: "asmps",
+	45002: "rs-status",
+	45045: "synctest",
+	45054: "invision-ag",
+	45514: "cloudcheck",
+	45678: "eba",
+	45824: "dai-shell",
+	45825: "qdb2service",
+	45966: "ssr-servermgr",
+	46336: "inedo",
+	46998: "spremotetablet",
+	46999: "mediabox",
+	47000: "mbus",
+	47001: "winrm",
+	47557: "dbbrowse",
+	47624: "directplaysrvr",
+	47806: "ap",
+	47808: "bacnet",
+	48000: "nimcontroller",
+	48001: "nimspooler",
+	48002: "nimhub",
+	48003: "nimgtw",
+	48004: "nimbusdb",
+	48005: "nimbusdbctrl",
+	48049: "3gpp-cbsp",
+	48050: "weandsf",
+	48128: "isnetserv",
+	48129: "blp5",
+	48556: "com-bardac-dw",
+	48619: "iqobject",
+	48653: "robotraconteur",
+	49000: "matahari",
+	49001: "nusrp",
+}
+var udpPortNames = map[UDPPort]string{
+	1:     "tcpmux",
+	2:     "compressnet",
+	3:     "compressnet",
+	5:     "rje",
+	7:     "echo",
+	9:     "discard",
+	11:    "systat",
+	13:    "daytime",
+	17:    "qotd",
+	18:    "msp",
+	19:    "chargen",
+	20:    "ftp-data",
+	21:    "ftp",
+	22:    "ssh",
+	23:    "telnet",
+	25:    "smtp",
+	27:    "nsw-fe",
+	29:    "msg-icp",
+	31:    "msg-auth",
+	33:    "dsp",
+	37:    "time",
+	38:    "rap",
+	39:    "rlp",
+	41:    "graphics",
+	42:    "name",
+	43:    "nicname",
+	44:    "mpm-flags",
+	45:    "mpm",
+	46:    "mpm-snd",
+	48:    "auditd",
+	49:    "tacacs",
+	50:    "re-mail-ck",
+	52:    "xns-time",
+	53:    "domain",
+	54:    "xns-ch",
+	55:    "isi-gl",
+	56:    "xns-auth",
+	58:    "xns-mail",
+	62:    "acas",
+	63:    "whoispp",
+	64:    "covia",
+	65:    "tacacs-ds",
+	66:    "sql-net",
+	67:    "bootps",
+	68:    "bootpc",
+	69:    "tftp",
+	70:    "gopher",
+	71:    "netrjs-1",
+	72:    "netrjs-2",
+	73:    "netrjs-3",
+	74:    "netrjs-4",
+	76:    "deos",
+	78:    "vettcp",
+	79:    "finger",
+	80:    "http",
+	82:    "xfer",
+	83:    "mit-ml-dev",
+	84:    "ctf",
+	85:    "mit-ml-dev",
+	86:    "mfcobol",
+	88:    "kerberos",
+	89:    "su-mit-tg",
+	90:    "dnsix",
+	91:    "mit-dov",
+	92:    "npp",
+	93:    "dcp",
+	94:    "objcall",
+	95:    "supdup",
+	96:    "dixie",
+	97:    "swift-rvf",
+	98:    "tacnews",
+	99:    "metagram",
+	101:   "hostname",
+	102:   "iso-tsap",
+	103:   "gppitnp",
+	104:   "acr-nema",
+	105:   "cso",
+	106:   "3com-tsmux",
+	107:   "rtelnet",
+	108:   "snagas",
+	109:   "pop2",
+	110:   "pop3",
+	111:   "sunrpc",
+	112:   "mcidas",
+	113:   "auth",
+	115:   "sftp",
+	116:   "ansanotify",
+	117:   "uucp-path",
+	118:   "sqlserv",
+	119:   "nntp",
+	120:   "cfdptkt",
+	121:   "erpc",
+	122:   "smakynet",
+	123:   "ntp",
+	124:   "ansatrader",
+	125:   "locus-map",
+	126:   "nxedit",
+	127:   "locus-con",
+	128:   "gss-xlicen",
+	129:   "pwdgen",
+	130:   "cisco-fna",
+	131:   "cisco-tna",
+	132:   "cisco-sys",
+	133:   "statsrv",
+	134:   "ingres-net",
+	135:   "epmap",
+	136:   "profile",
+	137:   "netbios-ns",
+	138:   "netbios-dgm",
+	139:   "netbios-ssn",
+	140:   "emfis-data",
+	141:   "emfis-cntl",
+	142:   "bl-idm",
+	143:   "imap",
+	144:   "uma",
+	145:   "uaac",
+	146:   "iso-tp0",
+	147:   "iso-ip",
+	148:   "jargon",
+	149:   "aed-512",
+	150:   "sql-net",
+	151:   "hems",
+	152:   "bftp",
+	153:   "sgmp",
+	154:   "netsc-prod",
+	155:   "netsc-dev",
+	156:   "sqlsrv",
+	157:   "knet-cmp",
+	158:   "pcmail-srv",
+	159:   "nss-routing",
+	160:   "sgmp-traps",
+	161:   "snmp",
+	162:   "snmptrap",
+	163:   "cmip-man",
+	164:   "cmip-agent",
+	165:   "xns-courier",
+	166:   "s-net",
+	167:   "namp",
+	168:   "rsvd",
+	169:   "send",
+	170:   "print-srv",
+	171:   "multiplex",
+	172:   "cl-1",
+	173:   "xyplex-mux",
+	174:   "mailq",
+	175:   "vmnet",
+	176:   "genrad-mux",
+	177:   "xdmcp",
+	178:   "nextstep",
+	179:   "bgp",
+	180:   "ris",
+	181:   "unify",
+	182:   "audit",
+	183:   "ocbinder",
+	184:   "ocserver",
+	185:   "remote-kis",
+	186:   "kis",
+	187:   "aci",
+	188:   "mumps",
+	189:   "qft",
+	190:   "gacp",
+	191:   "prospero",
+	192:   "osu-nms",
+	193:   "srmp",
+	194:   "irc",
+	195:   "dn6-nlm-aud",
+	196:   "dn6-smm-red",
+	197:   "dls",
+	198:   "dls-mon",
+	199:   "smux",
+	200:   "src",
+	201:   "at-rtmp",
+	202:   "at-nbp",
+	203:   "at-3",
+	204:   "at-echo",
+	205:   "at-5",
+	206:   "at-zis",
+	207:   "at-7",
+	208:   "at-8",
+	209:   "qmtp",
+	210:   "z39-50",
+	211:   "914c-g",
+	212:   "anet",
+	213:   "ipx",
+	214:   "vmpwscs",
+	215:   "softpc",
+	216:   "CAIlic",
+	217:   "dbase",
+	218:   "mpp",
+	219:   "uarps",
+	220:   "imap3",
+	221:   "fln-spx",
+	222:   "rsh-spx",
+	223:   "cdc",
+	224:   "masqdialer",
+	242:   "direct",
+	243:   "sur-meas",
+	244:   "inbusiness",
+	245:   "link",
+	246:   "dsp3270",
+	247:   "subntbcst-tftp",
+	248:   "bhfhs",
+	256:   "rap",
+	257:   "set",
+	259:   "esro-gen",
+	260:   "openport",
+	261:   "nsiiops",
+	262:   "arcisdms",
+	263:   "hdap",
+	264:   "bgmp",
+	265:   "x-bone-ctl",
+	266:   "sst",
+	267:   "td-service",
+	268:   "td-replica",
+	269:   "manet",
+	270:   "gist",
+	280:   "http-mgmt",
+	281:   "personal-link",
+	282:   "cableport-ax",
+	283:   "rescap",
+	284:   "corerjd",
+	286:   "fxp",
+	287:   "k-block",
+	308:   "novastorbakcup",
+	309:   "entrusttime",
+	310:   "bhmds",
+	311:   "asip-webadmin",
+	312:   "vslmp",
+	313:   "magenta-logic",
+	314:   "opalis-robot",
+	315:   "dpsi",
+	316:   "decauth",
+	317:   "zannet",
+	318:   "pkix-timestamp",
+	319:   "ptp-event",
+	320:   "ptp-general",
+	321:   "pip",
+	322:   "rtsps",
+	333:   "texar",
+	344:   "pdap",
+	345:   "pawserv",
+	346:   "zserv",
+	347:   "fatserv",
+	348:   "csi-sgwp",
+	349:   "mftp",
+	350:   "matip-type-a",
+	351:   "matip-type-b",
+	352:   "dtag-ste-sb",
+	353:   "ndsauth",
+	354:   "bh611",
+	355:   "datex-asn",
+	356:   "cloanto-net-1",
+	357:   "bhevent",
+	358:   "shrinkwrap",
+	359:   "nsrmp",
+	360:   "scoi2odialog",
+	361:   "semantix",
+	362:   "srssend",
+	363:   "rsvp-tunnel",
+	364:   "aurora-cmgr",
+	365:   "dtk",
+	366:   "odmr",
+	367:   "mortgageware",
+	368:   "qbikgdp",
+	369:   "rpc2portmap",
+	370:   "codaauth2",
+	371:   "clearcase",
+	372:   "ulistproc",
+	373:   "legent-1",
+	374:   "legent-2",
+	375:   "hassle",
+	376:   "nip",
+	377:   "tnETOS",
+	378:   "dsETOS",
+	379:   "is99c",
+	380:   "is99s",
+	381:   "hp-collector",
+	382:   "hp-managed-node",
+	383:   "hp-alarm-mgr",
+	384:   "arns",
+	385:   "ibm-app",
+	386:   "asa",
+	387:   "aurp",
+	388:   "unidata-ldm",
+	389:   "ldap",
+	390:   "uis",
+	391:   "synotics-relay",
+	392:   "synotics-broker",
+	393:   "meta5",
+	394:   "embl-ndt",
+	395:   "netcp",
+	396:   "netware-ip",
+	397:   "mptn",
+	398:   "kryptolan",
+	399:   "iso-tsap-c2",
+	400:   "osb-sd",
+	401:   "ups",
+	402:   "genie",
+	403:   "decap",
+	404:   "nced",
+	405:   "ncld",
+	406:   "imsp",
+	407:   "timbuktu",
+	408:   "prm-sm",
+	409:   "prm-nm",
+	410:   "decladebug",
+	411:   "rmt",
+	412:   "synoptics-trap",
+	413:   "smsp",
+	414:   "infoseek",
+	415:   "bnet",
+	416:   "silverplatter",
+	417:   "onmux",
+	418:   "hyper-g",
+	419:   "ariel1",
+	420:   "smpte",
+	421:   "ariel2",
+	422:   "ariel3",
+	423:   "opc-job-start",
+	424:   "opc-job-track",
+	425:   "icad-el",
+	426:   "smartsdp",
+	427:   "svrloc",
+	428:   "ocs-cmu",
+	429:   "ocs-amu",
+	430:   "utmpsd",
+	431:   "utmpcd",
+	432:   "iasd",
+	433:   "nnsp",
+	434:   "mobileip-agent",
+	435:   "mobilip-mn",
+	436:   "dna-cml",
+	437:   "comscm",
+	438:   "dsfgw",
+	439:   "dasp",
+	440:   "sgcp",
+	441:   "decvms-sysmgt",
+	442:   "cvc-hostd",
+	443:   "https",
+	444:   "snpp",
+	445:   "microsoft-ds",
+	446:   "ddm-rdb",
+	447:   "ddm-dfm",
+	448:   "ddm-ssl",
+	449:   "as-servermap",
+	450:   "tserver",
+	451:   "sfs-smp-net",
+	452:   "sfs-config",
+	453:   "creativeserver",
+	454:   "contentserver",
+	455:   "creativepartnr",
+	456:   "macon-udp",
+	457:   "scohelp",
+	458:   "appleqtc",
+	459:   "ampr-rcmd",
+	460:   "skronk",
+	461:   "datasurfsrv",
+	462:   "datasurfsrvsec",
+	463:   "alpes",
+	464:   "kpasswd",
+	465:   "igmpv3lite",
+	466:   "digital-vrc",
+	467:   "mylex-mapd",
+	468:   "photuris",
+	469:   "rcp",
+	470:   "scx-proxy",
+	471:   "mondex",
+	472:   "ljk-login",
+	473:   "hybrid-pop",
+	474:   "tn-tl-w2",
+	475:   "tcpnethaspsrv",
+	476:   "tn-tl-fd1",
+	477:   "ss7ns",
+	478:   "spsc",
+	479:   "iafserver",
+	480:   "iafdbase",
+	481:   "ph",
+	482:   "bgs-nsi",
+	483:   "ulpnet",
+	484:   "integra-sme",
+	485:   "powerburst",
+	486:   "avian",
+	487:   "saft",
+	488:   "gss-http",
+	489:   "nest-protocol",
+	490:   "micom-pfs",
+	491:   "go-login",
+	492:   "ticf-1",
+	493:   "ticf-2",
+	494:   "pov-ray",
+	495:   "intecourier",
+	496:   "pim-rp-disc",
+	497:   "retrospect",
+	498:   "siam",
+	499:   "iso-ill",
+	500:   "isakmp",
+	501:   "stmf",
+	502:   "mbap",
+	503:   "intrinsa",
+	504:   "citadel",
+	505:   "mailbox-lm",
+	506:   "ohimsrv",
+	507:   "crs",
+	508:   "xvttp",
+	509:   "snare",
+	510:   "fcp",
+	511:   "passgo",
+	512:   "comsat",
+	513:   "who",
+	514:   "syslog",
+	515:   "printer",
+	516:   "videotex",
+	517:   "talk",
+	518:   "ntalk",
+	519:   "utime",
+	520:   "router",
+	521:   "ripng",
+	522:   "ulp",
+	523:   "ibm-db2",
+	524:   "ncp",
+	525:   "timed",
+	526:   "tempo",
+	527:   "stx",
+	528:   "custix",
+	529:   "irc-serv",
+	530:   "courier",
+	531:   "conference",
+	532:   "netnews",
+	533:   "netwall",
+	534:   "windream",
+	535:   "iiop",
+	536:   "opalis-rdv",
+	537:   "nmsp",
+	538:   "gdomap",
+	539:   "apertus-ldp",
+	540:   "uucp",
+	541:   "uucp-rlogin",
+	542:   "commerce",
+	543:   "klogin",
+	544:   "kshell",
+	545:   "appleqtcsrvr",
+	546:   "dhcpv6-client",
+	547:   "dhcpv6-server",
+	548:   "afpovertcp",
+	549:   "idfp",
+	550:   "new-rwho",
+	551:   "cybercash",
+	552:   "devshr-nts",
+	553:   "pirp",
+	554:   "rtsp",
+	555:   "dsf",
+	556:   "remotefs",
+	557:   "openvms-sysipc",
+	558:   "sdnskmp",
+	559:   "teedtap",
+	560:   "rmonitor",
+	561:   "monitor",
+	562:   "chshell",
+	563:   "nntps",
+	564:   "9pfs",
+	565:   "whoami",
+	566:   "streettalk",
+	567:   "banyan-rpc",
+	568:   "ms-shuttle",
+	569:   "ms-rome",
+	570:   "meter",
+	571:   "meter",
+	572:   "sonar",
+	573:   "banyan-vip",
+	574:   "ftp-agent",
+	575:   "vemmi",
+	576:   "ipcd",
+	577:   "vnas",
+	578:   "ipdd",
+	579:   "decbsrv",
+	580:   "sntp-heartbeat",
+	581:   "bdp",
+	582:   "scc-security",
+	583:   "philips-vc",
+	584:   "keyserver",
+	586:   "password-chg",
+	587:   "submission",
+	588:   "cal",
+	589:   "eyelink",
+	590:   "tns-cml",
+	591:   "http-alt",
+	592:   "eudora-set",
+	593:   "http-rpc-epmap",
+	594:   "tpip",
+	595:   "cab-protocol",
+	596:   "smsd",
+	597:   "ptcnameservice",
+	598:   "sco-websrvrmg3",
+	599:   "acp",
+	600:   "ipcserver",
+	601:   "syslog-conn",
+	602:   "xmlrpc-beep",
+	603:   "idxp",
+	604:   "tunnel",
+	605:   "soap-beep",
+	606:   "urm",
+	607:   "nqs",
+	608:   "sift-uft",
+	609:   "npmp-trap",
+	610:   "npmp-local",
+	611:   "npmp-gui",
+	612:   "hmmp-ind",
+	613:   "hmmp-op",
+	614:   "sshell",
+	615:   "sco-inetmgr",
+	616:   "sco-sysmgr",
+	617:   "sco-dtmgr",
+	618:   "dei-icda",
+	619:   "compaq-evm",
+	620:   "sco-websrvrmgr",
+	621:   "escp-ip",
+	622:   "collaborator",
+	623:   "asf-rmcp",
+	624:   "cryptoadmin",
+	625:   "dec-dlm",
+	626:   "asia",
+	627:   "passgo-tivoli",
+	628:   "qmqp",
+	629:   "3com-amp3",
+	630:   "rda",
+	631:   "ipp",
+	632:   "bmpp",
+	633:   "servstat",
+	634:   "ginad",
+	635:   "rlzdbase",
+	636:   "ldaps",
+	637:   "lanserver",
+	638:   "mcns-sec",
+	639:   "msdp",
+	640:   "entrust-sps",
+	641:   "repcmd",
+	642:   "esro-emsdp",
+	643:   "sanity",
+	644:   "dwr",
+	645:   "pssc",
+	646:   "ldp",
+	647:   "dhcp-failover",
+	648:   "rrp",
+	649:   "cadview-3d",
+	650:   "obex",
+	651:   "ieee-mms",
+	652:   "hello-port",
+	653:   "repscmd",
+	654:   "aodv",
+	655:   "tinc",
+	656:   "spmp",
+	657:   "rmc",
+	658:   "tenfold",
+	660:   "mac-srvr-admin",
+	661:   "hap",
+	662:   "pftp",
+	663:   "purenoise",
+	664:   "asf-secure-rmcp",
+	665:   "sun-dr",
+	666:   "mdqs",
+	667:   "disclose",
+	668:   "mecomm",
+	669:   "meregister",
+	670:   "vacdsm-sws",
+	671:   "vacdsm-app",
+	672:   "vpps-qua",
+	673:   "cimplex",
+	674:   "acap",
+	675:   "dctp",
+	676:   "vpps-via",
+	677:   "vpp",
+	678:   "ggf-ncp",
+	679:   "mrm",
+	680:   "entrust-aaas",
+	681:   "entrust-aams",
+	682:   "xfr",
+	683:   "corba-iiop",
+	684:   "corba-iiop-ssl",
+	685:   "mdc-portmapper",
+	686:   "hcp-wismar",
+	687:   "asipregistry",
+	688:   "realm-rusd",
+	689:   "nmap",
+	690:   "vatp",
+	691:   "msexch-routing",
+	692:   "hyperwave-isp",
+	693:   "connendp",
+	694:   "ha-cluster",
+	695:   "ieee-mms-ssl",
+	696:   "rushd",
+	697:   "uuidgen",
+	698:   "olsr",
+	699:   "accessnetwork",
+	700:   "epp",
+	701:   "lmp",
+	702:   "iris-beep",
+	704:   "elcsd",
+	705:   "agentx",
+	706:   "silc",
+	707:   "borland-dsj",
+	709:   "entrust-kmsh",
+	710:   "entrust-ash",
+	711:   "cisco-tdp",
+	712:   "tbrpf",
+	713:   "iris-xpc",
+	714:   "iris-xpcs",
+	715:   "iris-lwz",
+	716:   "pana",
+	729:   "netviewdm1",
+	730:   "netviewdm2",
+	731:   "netviewdm3",
+	741:   "netgw",
+	742:   "netrcs",
+	744:   "flexlm",
+	747:   "fujitsu-dev",
+	748:   "ris-cm",
+	749:   "kerberos-adm",
+	750:   "loadav",
+	751:   "pump",
+	752:   "qrh",
+	753:   "rrh",
+	754:   "tell",
+	758:   "nlogin",
+	759:   "con",
+	760:   "ns",
+	761:   "rxe",
+	762:   "quotad",
+	763:   "cycleserv",
+	764:   "omserv",
+	765:   "webster",
+	767:   "phonebook",
+	769:   "vid",
+	770:   "cadlock",
+	771:   "rtip",
+	772:   "cycleserv2",
+	773:   "notify",
+	774:   "acmaint-dbd",
+	775:   "acmaint-transd",
+	776:   "wpages",
+	777:   "multiling-http",
+	780:   "wpgs",
+	800:   "mdbs-daemon",
+	801:   "device",
+	802:   "mbap-s",
+	810:   "fcp-udp",
+	828:   "itm-mcell-s",
+	829:   "pkix-3-ca-ra",
+	830:   "netconf-ssh",
+	831:   "netconf-beep",
+	832:   "netconfsoaphttp",
+	833:   "netconfsoapbeep",
+	847:   "dhcp-failover2",
+	848:   "gdoi",
+	853:   "domain-s",
+	854:   "dlep",
+	860:   "iscsi",
+	861:   "owamp-control",
+	862:   "twamp-control",
+	873:   "rsync",
+	886:   "iclcnet-locate",
+	887:   "iclcnet-svinfo",
+	888:   "accessbuilder",
+	900:   "omginitialrefs",
+	901:   "smpnameres",
+	902:   "ideafarm-door",
+	903:   "ideafarm-panic",
+	910:   "kink",
+	911:   "xact-backup",
+	912:   "apex-mesh",
+	913:   "apex-edge",
+	989:   "ftps-data",
+	990:   "ftps",
+	991:   "nas",
+	992:   "telnets",
+	993:   "imaps",
+	995:   "pop3s",
+	996:   "vsinet",
+	997:   "maitrd",
+	998:   "puparp",
+	999:   "applix",
+	1000:  "cadlock2",
+	1010:  "surf",
+	1021:  "exp1",
+	1022:  "exp2",
+	1025:  "blackjack",
+	1026:  "cap",
+	1027:  "6a44",
+	1029:  "solid-mux",
+	1033:  "netinfo-local",
+	1034:  "activesync",
+	1035:  "mxxrlogin",
+	1036:  "nsstp",
+	1037:  "ams",
+	1038:  "mtqp",
+	1039:  "sbl",
+	1040:  "netarx",
+	1041:  "danf-ak2",
+	1042:  "afrog",
+	1043:  "boinc-client",
+	1044:  "dcutility",
+	1045:  "fpitp",
+	1046:  "wfremotertm",
+	1047:  "neod1",
+	1048:  "neod2",
+	1049:  "td-postman",
+	1050:  "cma",
+	1051:  "optima-vnet",
+	1052:  "ddt",
+	1053:  "remote-as",
+	1054:  "brvread",
+	1055:  "ansyslmd",
+	1056:  "vfo",
+	1057:  "startron",
+	1058:  "nim",
+	1059:  "nimreg",
+	1060:  "polestar",
+	1061:  "kiosk",
+	1062:  "veracity",
+	1063:  "kyoceranetdev",
+	1064:  "jstel",
+	1065:  "syscomlan",
+	1066:  "fpo-fns",
+	1067:  "instl-boots",
+	1068:  "instl-bootc",
+	1069:  "cognex-insight",
+	1070:  "gmrupdateserv",
+	1071:  "bsquare-voip",
+	1072:  "cardax",
+	1073:  "bridgecontrol",
+	1074:  "warmspotMgmt",
+	1075:  "rdrmshc",
+	1076:  "dab-sti-c",
+	1077:  "imgames",
+	1078:  "avocent-proxy",
+	1079:  "asprovatalk",
+	1080:  "socks",
+	1081:  "pvuniwien",
+	1082:  "amt-esd-prot",
+	1083:  "ansoft-lm-1",
+	1084:  "ansoft-lm-2",
+	1085:  "webobjects",
+	1086:  "cplscrambler-lg",
+	1087:  "cplscrambler-in",
+	1088:  "cplscrambler-al",
+	1089:  "ff-annunc",
+	1090:  "ff-fms",
+	1091:  "ff-sm",
+	1092:  "obrpd",
+	1093:  "proofd",
+	1094:  "rootd",
+	1095:  "nicelink",
+	1096:  "cnrprotocol",
+	1097:  "sunclustermgr",
+	1098:  "rmiactivation",
+	1099:  "rmiregistry",
+	1100:  "mctp",
+	1101:  "pt2-discover",
+	1102:  "adobeserver-1",
+	1103:  "adobeserver-2",
+	1104:  "xrl",
+	1105:  "ftranhc",
+	1106:  "isoipsigport-1",
+	1107:  "isoipsigport-2",
+	1108:  "ratio-adp",
+	1110:  "nfsd-keepalive",
+	1111:  "lmsocialserver",
+	1112:  "icp",
+	1113:  "ltp-deepspace",
+	1114:  "mini-sql",
+	1115:  "ardus-trns",
+	1116:  "ardus-cntl",
+	1117:  "ardus-mtrns",
+	1118:  "sacred",
+	1119:  "bnetgame",
+	1120:  "bnetfile",
+	1121:  "rmpp",
+	1122:  "availant-mgr",
+	1123:  "murray",
+	1124:  "hpvmmcontrol",
+	1125:  "hpvmmagent",
+	1126:  "hpvmmdata",
+	1127:  "kwdb-commn",
+	1128:  "saphostctrl",
+	1129:  "saphostctrls",
+	1130:  "casp",
+	1131:  "caspssl",
+	1132:  "kvm-via-ip",
+	1133:  "dfn",
+	1134:  "aplx",
+	1135:  "omnivision",
+	1136:  "hhb-gateway",
+	1137:  "trim",
+	1138:  "encrypted-admin",
+	1139:  "evm",
+	1140:  "autonoc",
+	1141:  "mxomss",
+	1142:  "edtools",
+	1143:  "imyx",
+	1144:  "fuscript",
+	1145:  "x9-icue",
+	1146:  "audit-transfer",
+	1147:  "capioverlan",
+	1148:  "elfiq-repl",
+	1149:  "bvtsonar",
+	1150:  "blaze",
+	1151:  "unizensus",
+	1152:  "winpoplanmess",
+	1153:  "c1222-acse",
+	1154:  "resacommunity",
+	1155:  "nfa",
+	1156:  "iascontrol-oms",
+	1157:  "iascontrol",
+	1158:  "dbcontrol-oms",
+	1159:  "oracle-oms",
+	1160:  "olsv",
+	1161:  "health-polling",
+	1162:  "health-trap",
+	1163:  "sddp",
+	1164:  "qsm-proxy",
+	1165:  "qsm-gui",
+	1166:  "qsm-remote",
+	1167:  "cisco-ipsla",
+	1168:  "vchat",
+	1169:  "tripwire",
+	1170:  "atc-lm",
+	1171:  "atc-appserver",
+	1172:  "dnap",
+	1173:  "d-cinema-rrp",
+	1174:  "fnet-remote-ui",
+	1175:  "dossier",
+	1176:  "indigo-server",
+	1177:  "dkmessenger",
+	1178:  "sgi-storman",
+	1179:  "b2n",
+	1180:  "mc-client",
+	1181:  "3comnetman",
+	1182:  "accelenet-data",
+	1183:  "llsurfup-http",
+	1184:  "llsurfup-https",
+	1185:  "catchpole",
+	1186:  "mysql-cluster",
+	1187:  "alias",
+	1188:  "hp-webadmin",
+	1189:  "unet",
+	1190:  "commlinx-avl",
+	1191:  "gpfs",
+	1192:  "caids-sensor",
+	1193:  "fiveacross",
+	1194:  "openvpn",
+	1195:  "rsf-1",
+	1196:  "netmagic",
+	1197:  "carrius-rshell",
+	1198:  "cajo-discovery",
+	1199:  "dmidi",
+	1200:  "scol",
+	1201:  "nucleus-sand",
+	1202:  "caiccipc",
+	1203:  "ssslic-mgr",
+	1204:  "ssslog-mgr",
+	1205:  "accord-mgc",
+	1206:  "anthony-data",
+	1207:  "metasage",
+	1208:  "seagull-ais",
+	1209:  "ipcd3",
+	1210:  "eoss",
+	1211:  "groove-dpp",
+	1212:  "lupa",
+	1213:  "mpc-lifenet",
+	1214:  "kazaa",
+	1215:  "scanstat-1",
+	1216:  "etebac5",
+	1217:  "hpss-ndapi",
+	1218:  "aeroflight-ads",
+	1219:  "aeroflight-ret",
+	1220:  "qt-serveradmin",
+	1221:  "sweetware-apps",
+	1222:  "nerv",
+	1223:  "tgp",
+	1224:  "vpnz",
+	1225:  "slinkysearch",
+	1226:  "stgxfws",
+	1227:  "dns2go",
+	1228:  "florence",
+	1229:  "zented",
+	1230:  "periscope",
+	1231:  "menandmice-lpm",
+	1232:  "first-defense",
+	1233:  "univ-appserver",
+	1234:  "search-agent",
+	1235:  "mosaicsyssvc1",
+	1236:  "bvcontrol",
+	1237:  "tsdos390",
+	1238:  "hacl-qs",
+	1239:  "nmsd",
+	1240:  "instantia",
+	1241:  "nessus",
+	1242:  "nmasoverip",
+	1243:  "serialgateway",
+	1244:  "isbconference1",
+	1245:  "isbconference2",
+	1246:  "payrouter",
+	1247:  "visionpyramid",
+	1248:  "hermes",
+	1249:  "mesavistaco",
+	1250:  "swldy-sias",
+	1251:  "servergraph",
+	1252:  "bspne-pcc",
+	1253:  "q55-pcc",
+	1254:  "de-noc",
+	1255:  "de-cache-query",
+	1256:  "de-server",
+	1257:  "shockwave2",
+	1258:  "opennl",
+	1259:  "opennl-voice",
+	1260:  "ibm-ssd",
+	1261:  "mpshrsv",
+	1262:  "qnts-orb",
+	1263:  "dka",
+	1264:  "prat",
+	1265:  "dssiapi",
+	1266:  "dellpwrappks",
+	1267:  "epc",
+	1268:  "propel-msgsys",
+	1269:  "watilapp",
+	1270:  "opsmgr",
+	1271:  "excw",
+	1272:  "cspmlockmgr",
+	1273:  "emc-gateway",
+	1274:  "t1distproc",
+	1275:  "ivcollector",
+	1277:  "miva-mqs",
+	1278:  "dellwebadmin-1",
+	1279:  "dellwebadmin-2",
+	1280:  "pictrography",
+	1281:  "healthd",
+	1282:  "emperion",
+	1283:  "productinfo",
+	1284:  "iee-qfx",
+	1285:  "neoiface",
+	1286:  "netuitive",
+	1287:  "routematch",
+	1288:  "navbuddy",
+	1289:  "jwalkserver",
+	1290:  "winjaserver",
+	1291:  "seagulllms",
+	1292:  "dsdn",
+	1293:  "pkt-krb-ipsec",
+	1294:  "cmmdriver",
+	1295:  "ehtp",
+	1296:  "dproxy",
+	1297:  "sdproxy",
+	1298:  "lpcp",
+	1299:  "hp-sci",
+	1300:  "h323hostcallsc",
+	1301:  "ci3-software-1",
+	1302:  "ci3-software-2",
+	1303:  "sftsrv",
+	1304:  "boomerang",
+	1305:  "pe-mike",
+	1306:  "re-conn-proto",
+	1307:  "pacmand",
+	1308:  "odsi",
+	1309:  "jtag-server",
+	1310:  "husky",
+	1311:  "rxmon",
+	1312:  "sti-envision",
+	1313:  "bmc-patroldb",
+	1314:  "pdps",
+	1315:  "els",
+	1316:  "exbit-escp",
+	1317:  "vrts-ipcserver",
+	1318:  "krb5gatekeeper",
+	1319:  "amx-icsp",
+	1320:  "amx-axbnet",
+	1321:  "pip",
+	1322:  "novation",
+	1323:  "brcd",
+	1324:  "delta-mcp",
+	1325:  "dx-instrument",
+	1326:  "wimsic",
+	1327:  "ultrex",
+	1328:  "ewall",
+	1329:  "netdb-export",
+	1330:  "streetperfect",
+	1331:  "intersan",
+	1332:  "pcia-rxp-b",
+	1333:  "passwrd-policy",
+	1334:  "writesrv",
+	1335:  "digital-notary",
+	1336:  "ischat",
+	1337:  "menandmice-dns",
+	1338:  "wmc-log-svc",
+	1339:  "kjtsiteserver",
+	1340:  "naap",
+	1341:  "qubes",
+	1342:  "esbroker",
+	1343:  "re101",
+	1344:  "icap",
+	1345:  "vpjp",
+	1346:  "alta-ana-lm",
+	1347:  "bbn-mmc",
+	1348:  "bbn-mmx",
+	1349:  "sbook",
+	1350:  "editbench",
+	1351:  "equationbuilder",
+	1352:  "lotusnote",
+	1353:  "relief",
+	1354:  "XSIP-network",
+	1355:  "intuitive-edge",
+	1356:  "cuillamartin",
+	1357:  "pegboard",
+	1358:  "connlcli",
+	1359:  "ftsrv",
+	1360:  "mimer",
+	1361:  "linx",
+	1362:  "timeflies",
+	1363:  "ndm-requester",
+	1364:  "ndm-server",
+	1365:  "adapt-sna",
+	1366:  "netware-csp",
+	1367:  "dcs",
+	1368:  "screencast",
+	1369:  "gv-us",
+	1370:  "us-gv",
+	1371:  "fc-cli",
+	1372:  "fc-ser",
+	1373:  "chromagrafx",
+	1374:  "molly",
+	1375:  "bytex",
+	1376:  "ibm-pps",
+	1377:  "cichlid",
+	1378:  "elan",
+	1379:  "dbreporter",
+	1380:  "telesis-licman",
+	1381:  "apple-licman",
+	1382:  "udt-os",
+	1383:  "gwha",
+	1384:  "os-licman",
+	1385:  "atex-elmd",
+	1386:  "checksum",
+	1387:  "cadsi-lm",
+	1388:  "objective-dbc",
+	1389:  "iclpv-dm",
+	1390:  "iclpv-sc",
+	1391:  "iclpv-sas",
+	1392:  "iclpv-pm",
+	1393:  "iclpv-nls",
+	1394:  "iclpv-nlc",
+	1395:  "iclpv-wsm",
+	1396:  "dvl-activemail",
+	1397:  "audio-activmail",
+	1398:  "video-activmail",
+	1399:  "cadkey-licman",
+	1400:  "cadkey-tablet",
+	1401:  "goldleaf-licman",
+	1402:  "prm-sm-np",
+	1403:  "prm-nm-np",
+	1404:  "igi-lm",
+	1405:  "ibm-res",
+	1406:  "netlabs-lm",
+	1408:  "sophia-lm",
+	1409:  "here-lm",
+	1410:  "hiq",
+	1411:  "af",
+	1412:  "innosys",
+	1413:  "innosys-acl",
+	1414:  "ibm-mqseries",
+	1415:  "dbstar",
+	1416:  "novell-lu6-2",
+	1417:  "timbuktu-srv1",
+	1418:  "timbuktu-srv2",
+	1419:  "timbuktu-srv3",
+	1420:  "timbuktu-srv4",
+	1421:  "gandalf-lm",
+	1422:  "autodesk-lm",
+	1423:  "essbase",
+	1424:  "hybrid",
+	1425:  "zion-lm",
+	1426:  "sais",
+	1427:  "mloadd",
+	1428:  "informatik-lm",
+	1429:  "nms",
+	1430:  "tpdu",
+	1431:  "rgtp",
+	1432:  "blueberry-lm",
+	1433:  "ms-sql-s",
+	1434:  "ms-sql-m",
+	1435:  "ibm-cics",
+	1436:  "saism",
+	1437:  "tabula",
+	1438:  "eicon-server",
+	1439:  "eicon-x25",
+	1440:  "eicon-slp",
+	1441:  "cadis-1",
+	1442:  "cadis-2",
+	1443:  "ies-lm",
+	1444:  "marcam-lm",
+	1445:  "proxima-lm",
+	1446:  "ora-lm",
+	1447:  "apri-lm",
+	1448:  "oc-lm",
+	1449:  "peport",
+	1450:  "dwf",
+	1451:  "infoman",
+	1452:  "gtegsc-lm",
+	1453:  "genie-lm",
+	1454:  "interhdl-elmd",
+	1455:  "esl-lm",
+	1456:  "dca",
+	1457:  "valisys-lm",
+	1458:  "nrcabq-lm",
+	1459:  "proshare1",
+	1460:  "proshare2",
+	1461:  "ibm-wrless-lan",
+	1462:  "world-lm",
+	1463:  "nucleus",
+	1464:  "msl-lmd",
+	1465:  "pipes",
+	1466:  "oceansoft-lm",
+	1467:  "csdmbase",
+	1468:  "csdm",
+	1469:  "aal-lm",
+	1470:  "uaiact",
+	1471:  "csdmbase",
+	1472:  "csdm",
+	1473:  "openmath",
+	1474:  "telefinder",
+	1475:  "taligent-lm",
+	1476:  "clvm-cfg",
+	1477:  "ms-sna-server",
+	1478:  "ms-sna-base",
+	1479:  "dberegister",
+	1480:  "pacerforum",
+	1481:  "airs",
+	1482:  "miteksys-lm",
+	1483:  "afs",
+	1484:  "confluent",
+	1485:  "lansource",
+	1486:  "nms-topo-serv",
+	1487:  "localinfosrvr",
+	1488:  "docstor",
+	1489:  "dmdocbroker",
+	1490:  "insitu-conf",
+	1492:  "stone-design-1",
+	1493:  "netmap-lm",
+	1494:  "ica",
+	1495:  "cvc",
+	1496:  "liberty-lm",
+	1497:  "rfx-lm",
+	1498:  "sybase-sqlany",
+	1499:  "fhc",
+	1500:  "vlsi-lm",
+	1501:  "saiscm",
+	1502:  "shivadiscovery",
+	1503:  "imtc-mcs",
+	1504:  "evb-elm",
+	1505:  "funkproxy",
+	1506:  "utcd",
+	1507:  "symplex",
+	1508:  "diagmond",
+	1509:  "robcad-lm",
+	1510:  "mvx-lm",
+	1511:  "3l-l1",
+	1512:  "wins",
+	1513:  "fujitsu-dtc",
+	1514:  "fujitsu-dtcns",
+	1515:  "ifor-protocol",
+	1516:  "vpad",
+	1517:  "vpac",
+	1518:  "vpvd",
+	1519:  "vpvc",
+	1520:  "atm-zip-office",
+	1521:  "ncube-lm",
+	1522:  "ricardo-lm",
+	1523:  "cichild-lm",
+	1524:  "ingreslock",
+	1525:  "orasrv",
+	1526:  "pdap-np",
+	1527:  "tlisrv",
+	1528:  "ngr-t",
+	1529:  "coauthor",
+	1530:  "rap-service",
+	1531:  "rap-listen",
+	1532:  "miroconnect",
+	1533:  "virtual-places",
+	1534:  "micromuse-lm",
+	1535:  "ampr-info",
+	1536:  "ampr-inter",
+	1537:  "sdsc-lm",
+	1538:  "3ds-lm",
+	1539:  "intellistor-lm",
+	1540:  "rds",
+	1541:  "rds2",
+	1542:  "gridgen-elmd",
+	1543:  "simba-cs",
+	1544:  "aspeclmd",
+	1545:  "vistium-share",
+	1546:  "abbaccuray",
+	1547:  "laplink",
+	1548:  "axon-lm",
+	1549:  "shivasound",
+	1550:  "3m-image-lm",
+	1551:  "hecmtl-db",
+	1552:  "pciarray",
+	1553:  "sna-cs",
+	1554:  "caci-lm",
+	1555:  "livelan",
+	1556:  "veritas-pbx",
+	1557:  "arbortext-lm",
+	1558:  "xingmpeg",
+	1559:  "web2host",
+	1560:  "asci-val",
+	1561:  "facilityview",
+	1562:  "pconnectmgr",
+	1563:  "cadabra-lm",
+	1564:  "pay-per-view",
+	1565:  "winddlb",
+	1566:  "corelvideo",
+	1567:  "jlicelmd",
+	1568:  "tsspmap",
+	1569:  "ets",
+	1570:  "orbixd",
+	1571:  "rdb-dbs-disp",
+	1572:  "chip-lm",
+	1573:  "itscomm-ns",
+	1574:  "mvel-lm",
+	1575:  "oraclenames",
+	1576:  "moldflow-lm",
+	1577:  "hypercube-lm",
+	1578:  "jacobus-lm",
+	1579:  "ioc-sea-lm",
+	1580:  "tn-tl-r2",
+	1581:  "mil-2045-47001",
+	1582:  "msims",
+	1583:  "simbaexpress",
+	1584:  "tn-tl-fd2",
+	1585:  "intv",
+	1586:  "ibm-abtact",
+	1587:  "pra-elmd",
+	1588:  "triquest-lm",
+	1589:  "vqp",
+	1590:  "gemini-lm",
+	1591:  "ncpm-pm",
+	1592:  "commonspace",
+	1593:  "mainsoft-lm",
+	1594:  "sixtrak",
+	1595:  "radio",
+	1596:  "radio-bc",
+	1597:  "orbplus-iiop",
+	1598:  "picknfs",
+	1599:  "simbaservices",
+	1600:  "issd",
+	1601:  "aas",
+	1602:  "inspect",
+	1603:  "picodbc",
+	1604:  "icabrowser",
+	1605:  "slp",
+	1606:  "slm-api",
+	1607:  "stt",
+	1608:  "smart-lm",
+	1609:  "isysg-lm",
+	1610:  "taurus-wh",
+	1611:  "ill",
+	1612:  "netbill-trans",
+	1613:  "netbill-keyrep",
+	1614:  "netbill-cred",
+	1615:  "netbill-auth",
+	1616:  "netbill-prod",
+	1617:  "nimrod-agent",
+	1618:  "skytelnet",
+	1619:  "xs-openstorage",
+	1620:  "faxportwinport",
+	1621:  "softdataphone",
+	1622:  "ontime",
+	1623:  "jaleosnd",
+	1624:  "udp-sr-port",
+	1625:  "svs-omagent",
+	1626:  "shockwave",
+	1627:  "t128-gateway",
+	1628:  "lontalk-norm",
+	1629:  "lontalk-urgnt",
+	1630:  "oraclenet8cman",
+	1631:  "visitview",
+	1632:  "pammratc",
+	1633:  "pammrpc",
+	1634:  "loaprobe",
+	1635:  "edb-server1",
+	1636:  "isdc",
+	1637:  "islc",
+	1638:  "ismc",
+	1639:  "cert-initiator",
+	1640:  "cert-responder",
+	1641:  "invision",
+	1642:  "isis-am",
+	1643:  "isis-ambc",
+	1644:  "saiseh",
+	1645:  "sightline",
+	1646:  "sa-msg-port",
+	1647:  "rsap",
+	1648:  "concurrent-lm",
+	1649:  "kermit",
+	1650:  "nkd",
+	1651:  "shiva-confsrvr",
+	1652:  "xnmp",
+	1653:  "alphatech-lm",
+	1654:  "stargatealerts",
+	1655:  "dec-mbadmin",
+	1656:  "dec-mbadmin-h",
+	1657:  "fujitsu-mmpdc",
+	1658:  "sixnetudr",
+	1659:  "sg-lm",
+	1660:  "skip-mc-gikreq",
+	1661:  "netview-aix-1",
+	1662:  "netview-aix-2",
+	1663:  "netview-aix-3",
+	1664:  "netview-aix-4",
+	1665:  "netview-aix-5",
+	1666:  "netview-aix-6",
+	1667:  "netview-aix-7",
+	1668:  "netview-aix-8",
+	1669:  "netview-aix-9",
+	1670:  "netview-aix-10",
+	1671:  "netview-aix-11",
+	1672:  "netview-aix-12",
+	1673:  "proshare-mc-1",
+	1674:  "proshare-mc-2",
+	1675:  "pdp",
+	1676:  "netcomm2",
+	1677:  "groupwise",
+	1678:  "prolink",
+	1679:  "darcorp-lm",
+	1680:  "microcom-sbp",
+	1681:  "sd-elmd",
+	1682:  "lanyon-lantern",
+	1683:  "ncpm-hip",
+	1684:  "snaresecure",
+	1685:  "n2nremote",
+	1686:  "cvmon",
+	1687:  "nsjtp-ctrl",
+	1688:  "nsjtp-data",
+	1689:  "firefox",
+	1690:  "ng-umds",
+	1691:  "empire-empuma",
+	1692:  "sstsys-lm",
+	1693:  "rrirtr",
+	1694:  "rrimwm",
+	1695:  "rrilwm",
+	1696:  "rrifmm",
+	1697:  "rrisat",
+	1698:  "rsvp-encap-1",
+	1699:  "rsvp-encap-2",
+	1700:  "mps-raft",
+	1701:  "l2f",
+	1702:  "deskshare",
+	1703:  "hb-engine",
+	1704:  "bcs-broker",
+	1705:  "slingshot",
+	1706:  "jetform",
+	1707:  "vdmplay",
+	1708:  "gat-lmd",
+	1709:  "centra",
+	1710:  "impera",
+	1711:  "pptconference",
+	1712:  "registrar",
+	1713:  "conferencetalk",
+	1714:  "sesi-lm",
+	1715:  "houdini-lm",
+	1716:  "xmsg",
+	1717:  "fj-hdnet",
+	1718:  "h323gatedisc",
+	1719:  "h323gatestat",
+	1720:  "h323hostcall",
+	1721:  "caicci",
+	1722:  "hks-lm",
+	1723:  "pptp",
+	1724:  "csbphonemaster",
+	1725:  "iden-ralp",
+	1726:  "iberiagames",
+	1727:  "winddx",
+	1728:  "telindus",
+	1729:  "citynl",
+	1730:  "roketz",
+	1731:  "msiccp",
+	1732:  "proxim",
+	1733:  "siipat",
+	1734:  "cambertx-lm",
+	1735:  "privatechat",
+	1736:  "street-stream",
+	1737:  "ultimad",
+	1738:  "gamegen1",
+	1739:  "webaccess",
+	1740:  "encore",
+	1741:  "cisco-net-mgmt",
+	1742:  "3Com-nsd",
+	1743:  "cinegrfx-lm",
+	1744:  "ncpm-ft",
+	1745:  "remote-winsock",
+	1746:  "ftrapid-1",
+	1747:  "ftrapid-2",
+	1748:  "oracle-em1",
+	1749:  "aspen-services",
+	1750:  "sslp",
+	1751:  "swiftnet",
+	1752:  "lofr-lm",
+	1754:  "oracle-em2",
+	1755:  "ms-streaming",
+	1756:  "capfast-lmd",
+	1757:  "cnhrp",
+	1758:  "tftp-mcast",
+	1759:  "spss-lm",
+	1760:  "www-ldap-gw",
+	1761:  "cft-0",
+	1762:  "cft-1",
+	1763:  "cft-2",
+	1764:  "cft-3",
+	1765:  "cft-4",
+	1766:  "cft-5",
+	1767:  "cft-6",
+	1768:  "cft-7",
+	1769:  "bmc-net-adm",
+	1770:  "bmc-net-svc",
+	1771:  "vaultbase",
+	1772:  "essweb-gw",
+	1773:  "kmscontrol",
+	1774:  "global-dtserv",
+	1776:  "femis",
+	1777:  "powerguardian",
+	1778:  "prodigy-intrnet",
+	1779:  "pharmasoft",
+	1780:  "dpkeyserv",
+	1781:  "answersoft-lm",
+	1782:  "hp-hcip",
+	1784:  "finle-lm",
+	1785:  "windlm",
+	1786:  "funk-logger",
+	1787:  "funk-license",
+	1788:  "psmond",
+	1789:  "hello",
+	1790:  "nmsp",
+	1791:  "ea1",
+	1792:  "ibm-dt-2",
+	1793:  "rsc-robot",
+	1794:  "cera-bcm",
+	1795:  "dpi-proxy",
+	1796:  "vocaltec-admin",
+	1797:  "uma",
+	1798:  "etp",
+	1799:  "netrisk",
+	1800:  "ansys-lm",
+	1801:  "msmq",
+	1802:  "concomp1",
+	1803:  "hp-hcip-gwy",
+	1804:  "enl",
+	1805:  "enl-name",
+	1806:  "musiconline",
+	1807:  "fhsp",
+	1808:  "oracle-vp2",
+	1809:  "oracle-vp1",
+	1810:  "jerand-lm",
+	1811:  "scientia-sdb",
+	1812:  "radius",
+	1813:  "radius-acct",
+	1814:  "tdp-suite",
+	1815:  "mmpft",
+	1816:  "harp",
+	1817:  "rkb-oscs",
+	1818:  "etftp",
+	1819:  "plato-lm",
+	1820:  "mcagent",
+	1821:  "donnyworld",
+	1822:  "es-elmd",
+	1823:  "unisys-lm",
+	1824:  "metrics-pas",
+	1825:  "direcpc-video",
+	1826:  "ardt",
+	1827:  "asi",
+	1828:  "itm-mcell-u",
+	1829:  "optika-emedia",
+	1830:  "net8-cman",
+	1831:  "myrtle",
+	1832:  "tht-treasure",
+	1833:  "udpradio",
+	1834:  "ardusuni",
+	1835:  "ardusmul",
+	1836:  "ste-smsc",
+	1837:  "csoft1",
+	1838:  "talnet",
+	1839:  "netopia-vo1",
+	1840:  "netopia-vo2",
+	1841:  "netopia-vo3",
+	1842:  "netopia-vo4",
+	1843:  "netopia-vo5",
+	1844:  "direcpc-dll",
+	1845:  "altalink",
+	1846:  "tunstall-pnc",
+	1847:  "slp-notify",
+	1848:  "fjdocdist",
+	1849:  "alpha-sms",
+	1850:  "gsi",
+	1851:  "ctcd",
+	1852:  "virtual-time",
+	1853:  "vids-avtp",
+	1854:  "buddy-draw",
+	1855:  "fiorano-rtrsvc",
+	1856:  "fiorano-msgsvc",
+	1857:  "datacaptor",
+	1858:  "privateark",
+	1859:  "gammafetchsvr",
+	1860:  "sunscalar-svc",
+	1861:  "lecroy-vicp",
+	1862:  "mysql-cm-agent",
+	1863:  "msnp",
+	1864:  "paradym-31port",
+	1865:  "entp",
+	1866:  "swrmi",
+	1867:  "udrive",
+	1868:  "viziblebrowser",
+	1869:  "transact",
+	1870:  "sunscalar-dns",
+	1871:  "canocentral0",
+	1872:  "canocentral1",
+	1873:  "fjmpjps",
+	1874:  "fjswapsnp",
+	1875:  "westell-stats",
+	1876:  "ewcappsrv",
+	1877:  "hp-webqosdb",
+	1878:  "drmsmc",
+	1879:  "nettgain-nms",
+	1880:  "vsat-control",
+	1881:  "ibm-mqseries2",
+	1882:  "ecsqdmn",
+	1883:  "mqtt",
+	1884:  "idmaps",
+	1885:  "vrtstrapserver",
+	1886:  "leoip",
+	1887:  "filex-lport",
+	1888:  "ncconfig",
+	1889:  "unify-adapter",
+	1890:  "wilkenlistener",
+	1891:  "childkey-notif",
+	1892:  "childkey-ctrl",
+	1893:  "elad",
+	1894:  "o2server-port",
+	1896:  "b-novative-ls",
+	1897:  "metaagent",
+	1898:  "cymtec-port",
+	1899:  "mc2studios",
+	1900:  "ssdp",
+	1901:  "fjicl-tep-a",
+	1902:  "fjicl-tep-b",
+	1903:  "linkname",
+	1904:  "fjicl-tep-c",
+	1905:  "sugp",
+	1906:  "tpmd",
+	1907:  "intrastar",
+	1908:  "dawn",
+	1909:  "global-wlink",
+	1910:  "ultrabac",
+	1911:  "mtp",
+	1912:  "rhp-iibp",
+	1913:  "armadp",
+	1914:  "elm-momentum",
+	1915:  "facelink",
+	1916:  "persona",
+	1917:  "noagent",
+	1918:  "can-nds",
+	1919:  "can-dch",
+	1920:  "can-ferret",
+	1921:  "noadmin",
+	1922:  "tapestry",
+	1923:  "spice",
+	1924:  "xiip",
+	1925:  "discovery-port",
+	1926:  "egs",
+	1927:  "videte-cipc",
+	1928:  "emsd-port",
+	1929:  "bandwiz-system",
+	1930:  "driveappserver",
+	1931:  "amdsched",
+	1932:  "ctt-broker",
+	1933:  "xmapi",
+	1934:  "xaapi",
+	1935:  "macromedia-fcs",
+	1936:  "jetcmeserver",
+	1937:  "jwserver",
+	1938:  "jwclient",
+	1939:  "jvserver",
+	1940:  "jvclient",
+	1941:  "dic-aida",
+	1942:  "res",
+	1943:  "beeyond-media",
+	1944:  "close-combat",
+	1945:  "dialogic-elmd",
+	1946:  "tekpls",
+	1947:  "sentinelsrm",
+	1948:  "eye2eye",
+	1949:  "ismaeasdaqlive",
+	1950:  "ismaeasdaqtest",
+	1951:  "bcs-lmserver",
+	1952:  "mpnjsc",
+	1953:  "rapidbase",
+	1954:  "abr-api",
+	1955:  "abr-secure",
+	1956:  "vrtl-vmf-ds",
+	1957:  "unix-status",
+	1958:  "dxadmind",
+	1959:  "simp-all",
+	1960:  "nasmanager",
+	1961:  "bts-appserver",
+	1962:  "biap-mp",
+	1963:  "webmachine",
+	1964:  "solid-e-engine",
+	1965:  "tivoli-npm",
+	1966:  "slush",
+	1967:  "sns-quote",
+	1968:  "lipsinc",
+	1969:  "lipsinc1",
+	1970:  "netop-rc",
+	1971:  "netop-school",
+	1972:  "intersys-cache",
+	1973:  "dlsrap",
+	1974:  "drp",
+	1975:  "tcoflashagent",
+	1976:  "tcoregagent",
+	1977:  "tcoaddressbook",
+	1978:  "unisql",
+	1979:  "unisql-java",
+	1980:  "pearldoc-xact",
+	1981:  "p2pq",
+	1982:  "estamp",
+	1983:  "lhtp",
+	1984:  "bb",
+	1985:  "hsrp",
+	1986:  "licensedaemon",
+	1987:  "tr-rsrb-p1",
+	1988:  "tr-rsrb-p2",
+	1989:  "tr-rsrb-p3",
+	1990:  "stun-p1",
+	1991:  "stun-p2",
+	1992:  "stun-p3",
+	1993:  "snmp-tcp-port",
+	1994:  "stun-port",
+	1995:  "perf-port",
+	1996:  "tr-rsrb-port",
+	1997:  "gdp-port",
+	1998:  "x25-svc-port",
+	1999:  "tcp-id-port",
+	2000:  "cisco-sccp",
+	2001:  "wizard",
+	2002:  "globe",
+	2003:  "brutus",
+	2004:  "emce",
+	2005:  "oracle",
+	2006:  "raid-cd",
+	2007:  "raid-am",
+	2008:  "terminaldb",
+	2009:  "whosockami",
+	2010:  "pipe-server",
+	2011:  "servserv",
+	2012:  "raid-ac",
+	2013:  "raid-cd",
+	2014:  "raid-sf",
+	2015:  "raid-cs",
+	2016:  "bootserver",
+	2017:  "bootclient",
+	2018:  "rellpack",
+	2019:  "about",
+	2020:  "xinupageserver",
+	2021:  "xinuexpansion1",
+	2022:  "xinuexpansion2",
+	2023:  "xinuexpansion3",
+	2024:  "xinuexpansion4",
+	2025:  "xribs",
+	2026:  "scrabble",
+	2027:  "shadowserver",
+	2028:  "submitserver",
+	2029:  "hsrpv6",
+	2030:  "device2",
+	2031:  "mobrien-chat",
+	2032:  "blackboard",
+	2033:  "glogger",
+	2034:  "scoremgr",
+	2035:  "imsldoc",
+	2036:  "e-dpnet",
+	2037:  "applus",
+	2038:  "objectmanager",
+	2039:  "prizma",
+	2040:  "lam",
+	2041:  "interbase",
+	2042:  "isis",
+	2043:  "isis-bcast",
+	2044:  "rimsl",
+	2045:  "cdfunc",
+	2046:  "sdfunc",
+	2047:  "dls",
+	2048:  "dls-monitor",
+	2049:  "shilp",
+	2050:  "av-emb-config",
+	2051:  "epnsdp",
+	2052:  "clearvisn",
+	2053:  "lot105-ds-upd",
+	2054:  "weblogin",
+	2055:  "iop",
+	2056:  "omnisky",
+	2057:  "rich-cp",
+	2058:  "newwavesearch",
+	2059:  "bmc-messaging",
+	2060:  "teleniumdaemon",
+	2061:  "netmount",
+	2062:  "icg-swp",
+	2063:  "icg-bridge",
+	2064:  "icg-iprelay",
+	2065:  "dlsrpn",
+	2066:  "aura",
+	2067:  "dlswpn",
+	2068:  "avauthsrvprtcl",
+	2069:  "event-port",
+	2070:  "ah-esp-encap",
+	2071:  "acp-port",
+	2072:  "msync",
+	2073:  "gxs-data-port",
+	2074:  "vrtl-vmf-sa",
+	2075:  "newlixengine",
+	2076:  "newlixconfig",
+	2077:  "tsrmagt",
+	2078:  "tpcsrvr",
+	2079:  "idware-router",
+	2080:  "autodesk-nlm",
+	2081:  "kme-trap-port",
+	2082:  "infowave",
+	2083:  "radsec",
+	2084:  "sunclustergeo",
+	2085:  "ada-cip",
+	2086:  "gnunet",
+	2087:  "eli",
+	2088:  "ip-blf",
+	2089:  "sep",
+	2090:  "lrp",
+	2091:  "prp",
+	2092:  "descent3",
+	2093:  "nbx-cc",
+	2094:  "nbx-au",
+	2095:  "nbx-ser",
+	2096:  "nbx-dir",
+	2097:  "jetformpreview",
+	2098:  "dialog-port",
+	2099:  "h2250-annex-g",
+	2100:  "amiganetfs",
+	2101:  "rtcm-sc104",
+	2102:  "zephyr-srv",
+	2103:  "zephyr-clt",
+	2104:  "zephyr-hm",
+	2105:  "minipay",
+	2106:  "mzap",
+	2107:  "bintec-admin",
+	2108:  "comcam",
+	2109:  "ergolight",
+	2110:  "umsp",
+	2111:  "dsatp",
+	2112:  "idonix-metanet",
+	2113:  "hsl-storm",
+	2114:  "newheights",
+	2115:  "kdm",
+	2116:  "ccowcmr",
+	2117:  "mentaclient",
+	2118:  "mentaserver",
+	2119:  "gsigatekeeper",
+	2120:  "qencp",
+	2121:  "scientia-ssdb",
+	2122:  "caupc-remote",
+	2123:  "gtp-control",
+	2124:  "elatelink",
+	2125:  "lockstep",
+	2126:  "pktcable-cops",
+	2127:  "index-pc-wb",
+	2128:  "net-steward",
+	2129:  "cs-live",
+	2130:  "xds",
+	2131:  "avantageb2b",
+	2132:  "solera-epmap",
+	2133:  "zymed-zpp",
+	2134:  "avenue",
+	2135:  "gris",
+	2136:  "appworxsrv",
+	2137:  "connect",
+	2138:  "unbind-cluster",
+	2139:  "ias-auth",
+	2140:  "ias-reg",
+	2141:  "ias-admind",
+	2142:  "tdmoip",
+	2143:  "lv-jc",
+	2144:  "lv-ffx",
+	2145:  "lv-pici",
+	2146:  "lv-not",
+	2147:  "lv-auth",
+	2148:  "veritas-ucl",
+	2149:  "acptsys",
+	2150:  "dynamic3d",
+	2151:  "docent",
+	2152:  "gtp-user",
+	2153:  "ctlptc",
+	2154:  "stdptc",
+	2155:  "brdptc",
+	2156:  "trp",
+	2157:  "xnds",
+	2158:  "touchnetplus",
+	2159:  "gdbremote",
+	2160:  "apc-2160",
+	2161:  "apc-2161",
+	2162:  "navisphere",
+	2163:  "navisphere-sec",
+	2164:  "ddns-v3",
+	2165:  "x-bone-api",
+	2166:  "iwserver",
+	2167:  "raw-serial",
+	2168:  "easy-soft-mux",
+	2169:  "brain",
+	2170:  "eyetv",
+	2171:  "msfw-storage",
+	2172:  "msfw-s-storage",
+	2173:  "msfw-replica",
+	2174:  "msfw-array",
+	2175:  "airsync",
+	2176:  "rapi",
+	2177:  "qwave",
+	2178:  "bitspeer",
+	2179:  "vmrdp",
+	2180:  "mc-gt-srv",
+	2181:  "eforward",
+	2182:  "cgn-stat",
+	2183:  "cgn-config",
+	2184:  "nvd",
+	2185:  "onbase-dds",
+	2186:  "gtaua",
+	2187:  "ssmd",
+	2190:  "tivoconnect",
+	2191:  "tvbus",
+	2192:  "asdis",
+	2193:  "drwcs",
+	2197:  "mnp-exchange",
+	2198:  "onehome-remote",
+	2199:  "onehome-help",
+	2200:  "ici",
+	2201:  "ats",
+	2202:  "imtc-map",
+	2203:  "b2-runtime",
+	2204:  "b2-license",
+	2205:  "jps",
+	2206:  "hpocbus",
+	2207:  "hpssd",
+	2208:  "hpiod",
+	2209:  "rimf-ps",
+	2210:  "noaaport",
+	2211:  "emwin",
+	2212:  "leecoposserver",
+	2213:  "kali",
+	2214:  "rpi",
+	2215:  "ipcore",
+	2216:  "vtu-comms",
+	2217:  "gotodevice",
+	2218:  "bounzza",
+	2219:  "netiq-ncap",
+	2220:  "netiq",
+	2221:  "ethernet-ip-s",
+	2222:  "EtherNet-IP-1",
+	2223:  "rockwell-csp2",
+	2224:  "efi-mg",
+	2226:  "di-drm",
+	2227:  "di-msg",
+	2228:  "ehome-ms",
+	2229:  "datalens",
+	2230:  "queueadm",
+	2231:  "wimaxasncp",
+	2232:  "ivs-video",
+	2233:  "infocrypt",
+	2234:  "directplay",
+	2235:  "sercomm-wlink",
+	2236:  "nani",
+	2237:  "optech-port1-lm",
+	2238:  "aviva-sna",
+	2239:  "imagequery",
+	2240:  "recipe",
+	2241:  "ivsd",
+	2242:  "foliocorp",
+	2243:  "magicom",
+	2244:  "nmsserver",
+	2245:  "hao",
+	2246:  "pc-mta-addrmap",
+	2247:  "antidotemgrsvr",
+	2248:  "ums",
+	2249:  "rfmp",
+	2250:  "remote-collab",
+	2251:  "dif-port",
+	2252:  "njenet-ssl",
+	2253:  "dtv-chan-req",
+	2254:  "seispoc",
+	2255:  "vrtp",
+	2256:  "pcc-mfp",
+	2257:  "simple-tx-rx",
+	2258:  "rcts",
+	2260:  "apc-2260",
+	2261:  "comotionmaster",
+	2262:  "comotionback",
+	2263:  "ecwcfg",
+	2264:  "apx500api-1",
+	2265:  "apx500api-2",
+	2266:  "mfserver",
+	2267:  "ontobroker",
+	2268:  "amt",
+	2269:  "mikey",
+	2270:  "starschool",
+	2271:  "mmcals",
+	2272:  "mmcal",
+	2273:  "mysql-im",
+	2274:  "pcttunnell",
+	2275:  "ibridge-data",
+	2276:  "ibridge-mgmt",
+	2277:  "bluectrlproxy",
+	2278:  "s3db",
+	2279:  "xmquery",
+	2280:  "lnvpoller",
+	2281:  "lnvconsole",
+	2282:  "lnvalarm",
+	2283:  "lnvstatus",
+	2284:  "lnvmaps",
+	2285:  "lnvmailmon",
+	2286:  "nas-metering",
+	2287:  "dna",
+	2288:  "netml",
+	2289:  "dict-lookup",
+	2290:  "sonus-logging",
+	2291:  "eapsp",
+	2292:  "mib-streaming",
+	2293:  "npdbgmngr",
+	2294:  "konshus-lm",
+	2295:  "advant-lm",
+	2296:  "theta-lm",
+	2297:  "d2k-datamover1",
+	2298:  "d2k-datamover2",
+	2299:  "pc-telecommute",
+	2300:  "cvmmon",
+	2301:  "cpq-wbem",
+	2302:  "binderysupport",
+	2303:  "proxy-gateway",
+	2304:  "attachmate-uts",
+	2305:  "mt-scaleserver",
+	2306:  "tappi-boxnet",
+	2307:  "pehelp",
+	2308:  "sdhelp",
+	2309:  "sdserver",
+	2310:  "sdclient",
+	2311:  "messageservice",
+	2312:  "wanscaler",
+	2313:  "iapp",
+	2314:  "cr-websystems",
+	2315:  "precise-sft",
+	2316:  "sent-lm",
+	2317:  "attachmate-g32",
+	2318:  "cadencecontrol",
+	2319:  "infolibria",
+	2320:  "siebel-ns",
+	2321:  "rdlap",
+	2322:  "ofsd",
+	2323:  "3d-nfsd",
+	2324:  "cosmocall",
+	2325:  "ansysli",
+	2326:  "idcp",
+	2327:  "xingcsm",
+	2328:  "netrix-sftm",
+	2329:  "nvd",
+	2330:  "tscchat",
+	2331:  "agentview",
+	2332:  "rcc-host",
+	2333:  "snapp",
+	2334:  "ace-client",
+	2335:  "ace-proxy",
+	2336:  "appleugcontrol",
+	2337:  "ideesrv",
+	2338:  "norton-lambert",
+	2339:  "3com-webview",
+	2340:  "wrs-registry",
+	2341:  "xiostatus",
+	2342:  "manage-exec",
+	2343:  "nati-logos",
+	2344:  "fcmsys",
+	2345:  "dbm",
+	2346:  "redstorm-join",
+	2347:  "redstorm-find",
+	2348:  "redstorm-info",
+	2349:  "redstorm-diag",
+	2350:  "psbserver",
+	2351:  "psrserver",
+	2352:  "pslserver",
+	2353:  "pspserver",
+	2354:  "psprserver",
+	2355:  "psdbserver",
+	2356:  "gxtelmd",
+	2357:  "unihub-server",
+	2358:  "futrix",
+	2359:  "flukeserver",
+	2360:  "nexstorindltd",
+	2361:  "tl1",
+	2362:  "digiman",
+	2363:  "mediacntrlnfsd",
+	2364:  "oi-2000",
+	2365:  "dbref",
+	2366:  "qip-login",
+	2367:  "service-ctrl",
+	2368:  "opentable",
+	2370:  "l3-hbmon",
+	2372:  "lanmessenger",
+	2381:  "compaq-https",
+	2382:  "ms-olap3",
+	2383:  "ms-olap4",
+	2384:  "sd-capacity",
+	2385:  "sd-data",
+	2386:  "virtualtape",
+	2387:  "vsamredirector",
+	2388:  "mynahautostart",
+	2389:  "ovsessionmgr",
+	2390:  "rsmtp",
+	2391:  "3com-net-mgmt",
+	2392:  "tacticalauth",
+	2393:  "ms-olap1",
+	2394:  "ms-olap2",
+	2395:  "lan900-remote",
+	2396:  "wusage",
+	2397:  "ncl",
+	2398:  "orbiter",
+	2399:  "fmpro-fdal",
+	2400:  "opequus-server",
+	2401:  "cvspserver",
+	2402:  "taskmaster2000",
+	2403:  "taskmaster2000",
+	2404:  "iec-104",
+	2405:  "trc-netpoll",
+	2406:  "jediserver",
+	2407:  "orion",
+	2409:  "sns-protocol",
+	2410:  "vrts-registry",
+	2411:  "netwave-ap-mgmt",
+	2412:  "cdn",
+	2413:  "orion-rmi-reg",
+	2414:  "beeyond",
+	2415:  "codima-rtp",
+	2416:  "rmtserver",
+	2417:  "composit-server",
+	2418:  "cas",
+	2419:  "attachmate-s2s",
+	2420:  "dslremote-mgmt",
+	2421:  "g-talk",
+	2422:  "crmsbits",
+	2423:  "rnrp",
+	2424:  "kofax-svr",
+	2425:  "fjitsuappmgr",
+	2426:  "vcmp",
+	2427:  "mgcp-gateway",
+	2428:  "ott",
+	2429:  "ft-role",
+	2430:  "venus",
+	2431:  "venus-se",
+	2432:  "codasrv",
+	2433:  "codasrv-se",
+	2434:  "pxc-epmap",
+	2435:  "optilogic",
+	2436:  "topx",
+	2437:  "unicontrol",
+	2438:  "msp",
+	2439:  "sybasedbsynch",
+	2440:  "spearway",
+	2441:  "pvsw-inet",
+	2442:  "netangel",
+	2443:  "powerclientcsf",
+	2444:  "btpp2sectrans",
+	2445:  "dtn1",
+	2446:  "bues-service",
+	2447:  "ovwdb",
+	2448:  "hpppssvr",
+	2449:  "ratl",
+	2450:  "netadmin",
+	2451:  "netchat",
+	2452:  "snifferclient",
+	2453:  "madge-ltd",
+	2454:  "indx-dds",
+	2455:  "wago-io-system",
+	2456:  "altav-remmgt",
+	2457:  "rapido-ip",
+	2458:  "griffin",
+	2459:  "community",
+	2460:  "ms-theater",
+	2461:  "qadmifoper",
+	2462:  "qadmifevent",
+	2463:  "lsi-raid-mgmt",
+	2464:  "direcpc-si",
+	2465:  "lbm",
+	2466:  "lbf",
+	2467:  "high-criteria",
+	2468:  "qip-msgd",
+	2469:  "mti-tcs-comm",
+	2470:  "taskman-port",
+	2471:  "seaodbc",
+	2472:  "c3",
+	2473:  "aker-cdp",
+	2474:  "vitalanalysis",
+	2475:  "ace-server",
+	2476:  "ace-svr-prop",
+	2477:  "ssm-cvs",
+	2478:  "ssm-cssps",
+	2479:  "ssm-els",
+	2480:  "powerexchange",
+	2481:  "giop",
+	2482:  "giop-ssl",
+	2483:  "ttc",
+	2484:  "ttc-ssl",
+	2485:  "netobjects1",
+	2486:  "netobjects2",
+	2487:  "pns",
+	2488:  "moy-corp",
+	2489:  "tsilb",
+	2490:  "qip-qdhcp",
+	2491:  "conclave-cpp",
+	2492:  "groove",
+	2493:  "talarian-mqs",
+	2494:  "bmc-ar",
+	2495:  "fast-rem-serv",
+	2496:  "dirgis",
+	2497:  "quaddb",
+	2498:  "odn-castraq",
+	2499:  "unicontrol",
+	2500:  "rtsserv",
+	2501:  "rtsclient",
+	2502:  "kentrox-prot",
+	2503:  "nms-dpnss",
+	2504:  "wlbs",
+	2505:  "ppcontrol",
+	2506:  "jbroker",
+	2507:  "spock",
+	2508:  "jdatastore",
+	2509:  "fjmpss",
+	2510:  "fjappmgrbulk",
+	2511:  "metastorm",
+	2512:  "citrixima",
+	2513:  "citrixadmin",
+	2514:  "facsys-ntp",
+	2515:  "facsys-router",
+	2516:  "maincontrol",
+	2517:  "call-sig-trans",
+	2518:  "willy",
+	2519:  "globmsgsvc",
+	2520:  "pvsw",
+	2521:  "adaptecmgr",
+	2522:  "windb",
+	2523:  "qke-llc-v3",
+	2524:  "optiwave-lm",
+	2525:  "ms-v-worlds",
+	2526:  "ema-sent-lm",
+	2527:  "iqserver",
+	2528:  "ncr-ccl",
+	2529:  "utsftp",
+	2530:  "vrcommerce",
+	2531:  "ito-e-gui",
+	2532:  "ovtopmd",
+	2533:  "snifferserver",
+	2534:  "combox-web-acc",
+	2535:  "madcap",
+	2536:  "btpp2audctr1",
+	2537:  "upgrade",
+	2538:  "vnwk-prapi",
+	2539:  "vsiadmin",
+	2540:  "lonworks",
+	2541:  "lonworks2",
+	2542:  "udrawgraph",
+	2543:  "reftek",
+	2544:  "novell-zen",
+	2545:  "sis-emt",
+	2546:  "vytalvaultbrtp",
+	2547:  "vytalvaultvsmp",
+	2548:  "vytalvaultpipe",
+	2549:  "ipass",
+	2550:  "ads",
+	2551:  "isg-uda-server",
+	2552:  "call-logging",
+	2553:  "efidiningport",
+	2554:  "vcnet-link-v10",
+	2555:  "compaq-wcp",
+	2556:  "nicetec-nmsvc",
+	2557:  "nicetec-mgmt",
+	2558:  "pclemultimedia",
+	2559:  "lstp",
+	2560:  "labrat",
+	2561:  "mosaixcc",
+	2562:  "delibo",
+	2563:  "cti-redwood",
+	2564:  "hp-3000-telnet",
+	2565:  "coord-svr",
+	2566:  "pcs-pcw",
+	2567:  "clp",
+	2568:  "spamtrap",
+	2569:  "sonuscallsig",
+	2570:  "hs-port",
+	2571:  "cecsvc",
+	2572:  "ibp",
+	2573:  "trustestablish",
+	2574:  "blockade-bpsp",
+	2575:  "hl7",
+	2576:  "tclprodebugger",
+	2577:  "scipticslsrvr",
+	2578:  "rvs-isdn-dcp",
+	2579:  "mpfoncl",
+	2580:  "tributary",
+	2581:  "argis-te",
+	2582:  "argis-ds",
+	2583:  "mon",
+	2584:  "cyaserv",
+	2585:  "netx-server",
+	2586:  "netx-agent",
+	2587:  "masc",
+	2588:  "privilege",
+	2589:  "quartus-tcl",
+	2590:  "idotdist",
+	2591:  "maytagshuffle",
+	2592:  "netrek",
+	2593:  "mns-mail",
+	2594:  "dts",
+	2595:  "worldfusion1",
+	2596:  "worldfusion2",
+	2597:  "homesteadglory",
+	2598:  "citriximaclient",
+	2599:  "snapd",
+	2600:  "hpstgmgr",
+	2601:  "discp-client",
+	2602:  "discp-server",
+	2603:  "servicemeter",
+	2604:  "nsc-ccs",
+	2605:  "nsc-posa",
+	2606:  "netmon",
+	2607:  "connection",
+	2608:  "wag-service",
+	2609:  "system-monitor",
+	2610:  "versa-tek",
+	2611:  "lionhead",
+	2612:  "qpasa-agent",
+	2613:  "smntubootstrap",
+	2614:  "neveroffline",
+	2615:  "firepower",
+	2616:  "appswitch-emp",
+	2617:  "cmadmin",
+	2618:  "priority-e-com",
+	2619:  "bruce",
+	2620:  "lpsrecommender",
+	2621:  "miles-apart",
+	2622:  "metricadbc",
+	2623:  "lmdp",
+	2624:  "aria",
+	2625:  "blwnkl-port",
+	2626:  "gbjd816",
+	2627:  "moshebeeri",
+	2628:  "dict",
+	2629:  "sitaraserver",
+	2630:  "sitaramgmt",
+	2631:  "sitaradir",
+	2632:  "irdg-post",
+	2633:  "interintelli",
+	2634:  "pk-electronics",
+	2635:  "backburner",
+	2636:  "solve",
+	2637:  "imdocsvc",
+	2638:  "sybaseanywhere",
+	2639:  "aminet",
+	2640:  "ami-control",
+	2641:  "hdl-srv",
+	2642:  "tragic",
+	2643:  "gte-samp",
+	2644:  "travsoft-ipx-t",
+	2645:  "novell-ipx-cmd",
+	2646:  "and-lm",
+	2647:  "syncserver",
+	2648:  "upsnotifyprot",
+	2649:  "vpsipport",
+	2650:  "eristwoguns",
+	2651:  "ebinsite",
+	2652:  "interpathpanel",
+	2653:  "sonus",
+	2654:  "corel-vncadmin",
+	2655:  "unglue",
+	2656:  "kana",
+	2657:  "sns-dispatcher",
+	2658:  "sns-admin",
+	2659:  "sns-query",
+	2660:  "gcmonitor",
+	2661:  "olhost",
+	2662:  "bintec-capi",
+	2663:  "bintec-tapi",
+	2664:  "patrol-mq-gm",
+	2665:  "patrol-mq-nm",
+	2666:  "extensis",
+	2667:  "alarm-clock-s",
+	2668:  "alarm-clock-c",
+	2669:  "toad",
+	2670:  "tve-announce",
+	2671:  "newlixreg",
+	2672:  "nhserver",
+	2673:  "firstcall42",
+	2674:  "ewnn",
+	2675:  "ttc-etap",
+	2676:  "simslink",
+	2677:  "gadgetgate1way",
+	2678:  "gadgetgate2way",
+	2679:  "syncserverssl",
+	2680:  "pxc-sapxom",
+	2681:  "mpnjsomb",
+	2683:  "ncdloadbalance",
+	2684:  "mpnjsosv",
+	2685:  "mpnjsocl",
+	2686:  "mpnjsomg",
+	2687:  "pq-lic-mgmt",
+	2688:  "md-cg-http",
+	2689:  "fastlynx",
+	2690:  "hp-nnm-data",
+	2691:  "itinternet",
+	2692:  "admins-lms",
+	2694:  "pwrsevent",
+	2695:  "vspread",
+	2696:  "unifyadmin",
+	2697:  "oce-snmp-trap",
+	2698:  "mck-ivpip",
+	2699:  "csoft-plusclnt",
+	2700:  "tqdata",
+	2701:  "sms-rcinfo",
+	2702:  "sms-xfer",
+	2703:  "sms-chat",
+	2704:  "sms-remctrl",
+	2705:  "sds-admin",
+	2706:  "ncdmirroring",
+	2707:  "emcsymapiport",
+	2708:  "banyan-net",
+	2709:  "supermon",
+	2710:  "sso-service",
+	2711:  "sso-control",
+	2712:  "aocp",
+	2713:  "raventbs",
+	2714:  "raventdm",
+	2715:  "hpstgmgr2",
+	2716:  "inova-ip-disco",
+	2717:  "pn-requester",
+	2718:  "pn-requester2",
+	2719:  "scan-change",
+	2720:  "wkars",
+	2721:  "smart-diagnose",
+	2722:  "proactivesrvr",
+	2723:  "watchdog-nt",
+	2724:  "qotps",
+	2725:  "msolap-ptp2",
+	2726:  "tams",
+	2727:  "mgcp-callagent",
+	2728:  "sqdr",
+	2729:  "tcim-control",
+	2730:  "nec-raidplus",
+	2731:  "fyre-messanger",
+	2732:  "g5m",
+	2733:  "signet-ctf",
+	2734:  "ccs-software",
+	2735:  "netiq-mc",
+	2736:  "radwiz-nms-srv",
+	2737:  "srp-feedback",
+	2738:  "ndl-tcp-ois-gw",
+	2739:  "tn-timing",
+	2740:  "alarm",
+	2741:  "tsb",
+	2742:  "tsb2",
+	2743:  "murx",
+	2744:  "honyaku",
+	2745:  "urbisnet",
+	2746:  "cpudpencap",
+	2747:  "fjippol-swrly",
+	2748:  "fjippol-polsvr",
+	2749:  "fjippol-cnsl",
+	2750:  "fjippol-port1",
+	2751:  "fjippol-port2",
+	2752:  "rsisysaccess",
+	2753:  "de-spot",
+	2754:  "apollo-cc",
+	2755:  "expresspay",
+	2756:  "simplement-tie",
+	2757:  "cnrp",
+	2758:  "apollo-status",
+	2759:  "apollo-gms",
+	2760:  "sabams",
+	2761:  "dicom-iscl",
+	2762:  "dicom-tls",
+	2763:  "desktop-dna",
+	2764:  "data-insurance",
+	2765:  "qip-audup",
+	2766:  "compaq-scp",
+	2767:  "uadtc",
+	2768:  "uacs",
+	2769:  "exce",
+	2770:  "veronica",
+	2771:  "vergencecm",
+	2772:  "auris",
+	2773:  "rbakcup1",
+	2774:  "rbakcup2",
+	2775:  "smpp",
+	2776:  "ridgeway1",
+	2777:  "ridgeway2",
+	2778:  "gwen-sonya",
+	2779:  "lbc-sync",
+	2780:  "lbc-control",
+	2781:  "whosells",
+	2782:  "everydayrc",
+	2783:  "aises",
+	2784:  "www-dev",
+	2785:  "aic-np",
+	2786:  "aic-oncrpc",
+	2787:  "piccolo",
+	2788:  "fryeserv",
+	2789:  "media-agent",
+	2790:  "plgproxy",
+	2791:  "mtport-regist",
+	2792:  "f5-globalsite",
+	2793:  "initlsmsad",
+	2795:  "livestats",
+	2796:  "ac-tech",
+	2797:  "esp-encap",
+	2798:  "tmesis-upshot",
+	2799:  "icon-discover",
+	2800:  "acc-raid",
+	2801:  "igcp",
+	2802:  "veritas-udp1",
+	2803:  "btprjctrl",
+	2804:  "dvr-esm",
+	2805:  "wta-wsp-s",
+	2806:  "cspuni",
+	2807:  "cspmulti",
+	2808:  "j-lan-p",
+	2809:  "corbaloc",
+	2810:  "netsteward",
+	2811:  "gsiftp",
+	2812:  "atmtcp",
+	2813:  "llm-pass",
+	2814:  "llm-csv",
+	2815:  "lbc-measure",
+	2816:  "lbc-watchdog",
+	2817:  "nmsigport",
+	2818:  "rmlnk",
+	2819:  "fc-faultnotify",
+	2820:  "univision",
+	2821:  "vrts-at-port",
+	2822:  "ka0wuc",
+	2823:  "cqg-netlan",
+	2824:  "cqg-netlan-1",
+	2826:  "slc-systemlog",
+	2827:  "slc-ctrlrloops",
+	2828:  "itm-lm",
+	2829:  "silkp1",
+	2830:  "silkp2",
+	2831:  "silkp3",
+	2832:  "silkp4",
+	2833:  "glishd",
+	2834:  "evtp",
+	2835:  "evtp-data",
+	2836:  "catalyst",
+	2837:  "repliweb",
+	2838:  "starbot",
+	2839:  "nmsigport",
+	2840:  "l3-exprt",
+	2841:  "l3-ranger",
+	2842:  "l3-hawk",
+	2843:  "pdnet",
+	2844:  "bpcp-poll",
+	2845:  "bpcp-trap",
+	2846:  "aimpp-hello",
+	2847:  "aimpp-port-req",
+	2848:  "amt-blc-port",
+	2849:  "fxp",
+	2850:  "metaconsole",
+	2851:  "webemshttp",
+	2852:  "bears-01",
+	2853:  "ispipes",
+	2854:  "infomover",
+	2856:  "cesdinv",
+	2857:  "simctlp",
+	2858:  "ecnp",
+	2859:  "activememory",
+	2860:  "dialpad-voice1",
+	2861:  "dialpad-voice2",
+	2862:  "ttg-protocol",
+	2863:  "sonardata",
+	2864:  "astromed-main",
+	2865:  "pit-vpn",
+	2866:  "iwlistener",
+	2867:  "esps-portal",
+	2868:  "npep-messaging",
+	2869:  "icslap",
+	2870:  "daishi",
+	2871:  "msi-selectplay",
+	2872:  "radix",
+	2874:  "dxmessagebase1",
+	2875:  "dxmessagebase2",
+	2876:  "sps-tunnel",
+	2877:  "bluelance",
+	2878:  "aap",
+	2879:  "ucentric-ds",
+	2880:  "synapse",
+	2881:  "ndsp",
+	2882:  "ndtp",
+	2883:  "ndnp",
+	2884:  "flashmsg",
+	2885:  "topflow",
+	2886:  "responselogic",
+	2887:  "aironetddp",
+	2888:  "spcsdlobby",
+	2889:  "rsom",
+	2890:  "cspclmulti",
+	2891:  "cinegrfx-elmd",
+	2892:  "snifferdata",
+	2893:  "vseconnector",
+	2894:  "abacus-remote",
+	2895:  "natuslink",
+	2896:  "ecovisiong6-1",
+	2897:  "citrix-rtmp",
+	2898:  "appliance-cfg",
+	2899:  "powergemplus",
+	2900:  "quicksuite",
+	2901:  "allstorcns",
+	2902:  "netaspi",
+	2903:  "suitcase",
+	2904:  "m2ua",
+	2906:  "caller9",
+	2907:  "webmethods-b2b",
+	2908:  "mao",
+	2909:  "funk-dialout",
+	2910:  "tdaccess",
+	2911:  "blockade",
+	2912:  "epicon",
+	2913:  "boosterware",
+	2914:  "gamelobby",
+	2915:  "tksocket",
+	2916:  "elvin-server",
+	2917:  "elvin-client",
+	2918:  "kastenchasepad",
+	2919:  "roboer",
+	2920:  "roboeda",
+	2921:  "cesdcdman",
+	2922:  "cesdcdtrn",
+	2923:  "wta-wsp-wtp-s",
+	2924:  "precise-vip",
+	2926:  "mobile-file-dl",
+	2927:  "unimobilectrl",
+	2928:  "redstone-cpss",
+	2929:  "amx-webadmin",
+	2930:  "amx-weblinx",
+	2931:  "circle-x",
+	2932:  "incp",
+	2933:  "4-tieropmgw",
+	2934:  "4-tieropmcli",
+	2935:  "qtp",
+	2936:  "otpatch",
+	2937:  "pnaconsult-lm",
+	2938:  "sm-pas-1",
+	2939:  "sm-pas-2",
+	2940:  "sm-pas-3",
+	2941:  "sm-pas-4",
+	2942:  "sm-pas-5",
+	2943:  "ttnrepository",
+	2944:  "megaco-h248",
+	2945:  "h248-binary",
+	2946:  "fjsvmpor",
+	2947:  "gpsd",
+	2948:  "wap-push",
+	2949:  "wap-pushsecure",
+	2950:  "esip",
+	2951:  "ottp",
+	2952:  "mpfwsas",
+	2953:  "ovalarmsrv",
+	2954:  "ovalarmsrv-cmd",
+	2955:  "csnotify",
+	2956:  "ovrimosdbman",
+	2957:  "jmact5",
+	2958:  "jmact6",
+	2959:  "rmopagt",
+	2960:  "dfoxserver",
+	2961:  "boldsoft-lm",
+	2962:  "iph-policy-cli",
+	2963:  "iph-policy-adm",
+	2964:  "bullant-srap",
+	2965:  "bullant-rap",
+	2966:  "idp-infotrieve",
+	2967:  "ssc-agent",
+	2968:  "enpp",
+	2969:  "essp",
+	2970:  "index-net",
+	2971:  "netclip",
+	2972:  "pmsm-webrctl",
+	2973:  "svnetworks",
+	2974:  "signal",
+	2975:  "fjmpcm",
+	2976:  "cns-srv-port",
+	2977:  "ttc-etap-ns",
+	2978:  "ttc-etap-ds",
+	2979:  "h263-video",
+	2980:  "wimd",
+	2981:  "mylxamport",
+	2982:  "iwb-whiteboard",
+	2983:  "netplan",
+	2984:  "hpidsadmin",
+	2985:  "hpidsagent",
+	2986:  "stonefalls",
+	2987:  "identify",
+	2988:  "hippad",
+	2989:  "zarkov",
+	2990:  "boscap",
+	2991:  "wkstn-mon",
+	2992:  "avenyo",
+	2993:  "veritas-vis1",
+	2994:  "veritas-vis2",
+	2995:  "idrs",
+	2996:  "vsixml",
+	2997:  "rebol",
+	2998:  "realsecure",
+	2999:  "remoteware-un",
+	3000:  "hbci",
+	3002:  "exlm-agent",
+	3003:  "cgms",
+	3004:  "csoftragent",
+	3005:  "geniuslm",
+	3006:  "ii-admin",
+	3007:  "lotusmtap",
+	3008:  "midnight-tech",
+	3009:  "pxc-ntfy",
+	3010:  "ping-pong",
+	3011:  "trusted-web",
+	3012:  "twsdss",
+	3013:  "gilatskysurfer",
+	3014:  "broker-service",
+	3015:  "nati-dstp",
+	3016:  "notify-srvr",
+	3017:  "event-listener",
+	3018:  "srvc-registry",
+	3019:  "resource-mgr",
+	3020:  "cifs",
+	3021:  "agriserver",
+	3022:  "csregagent",
+	3023:  "magicnotes",
+	3024:  "nds-sso",
+	3025:  "arepa-raft",
+	3026:  "agri-gateway",
+	3027:  "LiebDevMgmt-C",
+	3028:  "LiebDevMgmt-DM",
+	3029:  "LiebDevMgmt-A",
+	3030:  "arepa-cas",
+	3031:  "eppc",
+	3032:  "redwood-chat",
+	3033:  "pdb",
+	3034:  "osmosis-aeea",
+	3035:  "fjsv-gssagt",
+	3036:  "hagel-dump",
+	3037:  "hp-san-mgmt",
+	3038:  "santak-ups",
+	3039:  "cogitate",
+	3040:  "tomato-springs",
+	3041:  "di-traceware",
+	3042:  "journee",
+	3043:  "brp",
+	3044:  "epp",
+	3045:  "responsenet",
+	3046:  "di-ase",
+	3047:  "hlserver",
+	3048:  "pctrader",
+	3049:  "nsws",
+	3050:  "gds-db",
+	3051:  "galaxy-server",
+	3052:  "apc-3052",
+	3053:  "dsom-server",
+	3054:  "amt-cnf-prot",
+	3055:  "policyserver",
+	3056:  "cdl-server",
+	3057:  "goahead-fldup",
+	3058:  "videobeans",
+	3059:  "qsoft",
+	3060:  "interserver",
+	3061:  "cautcpd",
+	3062:  "ncacn-ip-tcp",
+	3063:  "ncadg-ip-udp",
+	3064:  "rprt",
+	3065:  "slinterbase",
+	3066:  "netattachsdmp",
+	3067:  "fjhpjp",
+	3068:  "ls3bcast",
+	3069:  "ls3",
+	3070:  "mgxswitch",
+	3072:  "csd-monitor",
+	3073:  "vcrp",
+	3074:  "xbox",
+	3075:  "orbix-locator",
+	3076:  "orbix-config",
+	3077:  "orbix-loc-ssl",
+	3078:  "orbix-cfg-ssl",
+	3079:  "lv-frontpanel",
+	3080:  "stm-pproc",
+	3081:  "tl1-lv",
+	3082:  "tl1-raw",
+	3083:  "tl1-telnet",
+	3084:  "itm-mccs",
+	3085:  "pcihreq",
+	3086:  "jdl-dbkitchen",
+	3087:  "asoki-sma",
+	3088:  "xdtp",
+	3089:  "ptk-alink",
+	3090:  "stss",
+	3091:  "1ci-smcs",
+	3093:  "rapidmq-center",
+	3094:  "rapidmq-reg",
+	3095:  "panasas",
+	3096:  "ndl-aps",
+	3098:  "umm-port",
+	3099:  "chmd",
+	3100:  "opcon-xps",
+	3101:  "hp-pxpib",
+	3102:  "slslavemon",
+	3103:  "autocuesmi",
+	3104:  "autocuetime",
+	3105:  "cardbox",
+	3106:  "cardbox-http",
+	3107:  "business",
+	3108:  "geolocate",
+	3109:  "personnel",
+	3110:  "sim-control",
+	3111:  "wsynch",
+	3112:  "ksysguard",
+	3113:  "cs-auth-svr",
+	3114:  "ccmad",
+	3115:  "mctet-master",
+	3116:  "mctet-gateway",
+	3117:  "mctet-jserv",
+	3118:  "pkagent",
+	3119:  "d2000kernel",
+	3120:  "d2000webserver",
+	3122:  "vtr-emulator",
+	3123:  "edix",
+	3124:  "beacon-port",
+	3125:  "a13-an",
+	3127:  "ctx-bridge",
+	3128:  "ndl-aas",
+	3129:  "netport-id",
+	3130:  "icpv2",
+	3131:  "netbookmark",
+	3132:  "ms-rule-engine",
+	3133:  "prism-deploy",
+	3134:  "ecp",
+	3135:  "peerbook-port",
+	3136:  "grubd",
+	3137:  "rtnt-1",
+	3138:  "rtnt-2",
+	3139:  "incognitorv",
+	3140:  "ariliamulti",
+	3141:  "vmodem",
+	3142:  "rdc-wh-eos",
+	3143:  "seaview",
+	3144:  "tarantella",
+	3145:  "csi-lfap",
+	3146:  "bears-02",
+	3147:  "rfio",
+	3148:  "nm-game-admin",
+	3149:  "nm-game-server",
+	3150:  "nm-asses-admin",
+	3151:  "nm-assessor",
+	3152:  "feitianrockey",
+	3153:  "s8-client-port",
+	3154:  "ccmrmi",
+	3155:  "jpegmpeg",
+	3156:  "indura",
+	3157:  "e3consultants",
+	3158:  "stvp",
+	3159:  "navegaweb-port",
+	3160:  "tip-app-server",
+	3161:  "doc1lm",
+	3162:  "sflm",
+	3163:  "res-sap",
+	3164:  "imprs",
+	3165:  "newgenpay",
+	3166:  "sossecollector",
+	3167:  "nowcontact",
+	3168:  "poweronnud",
+	3169:  "serverview-as",
+	3170:  "serverview-asn",
+	3171:  "serverview-gf",
+	3172:  "serverview-rm",
+	3173:  "serverview-icc",
+	3174:  "armi-server",
+	3175:  "t1-e1-over-ip",
+	3176:  "ars-master",
+	3177:  "phonex-port",
+	3178:  "radclientport",
+	3179:  "h2gf-w-2m",
+	3180:  "mc-brk-srv",
+	3181:  "bmcpatrolagent",
+	3182:  "bmcpatrolrnvu",
+	3183:  "cops-tls",
+	3184:  "apogeex-port",
+	3185:  "smpppd",
+	3186:  "iiw-port",
+	3187:  "odi-port",
+	3188:  "brcm-comm-port",
+	3189:  "pcle-infex",
+	3190:  "csvr-proxy",
+	3191:  "csvr-sslproxy",
+	3192:  "firemonrcc",
+	3193:  "spandataport",
+	3194:  "magbind",
+	3195:  "ncu-1",
+	3196:  "ncu-2",
+	3197:  "embrace-dp-s",
+	3198:  "embrace-dp-c",
+	3199:  "dmod-workspace",
+	3200:  "tick-port",
+	3201:  "cpq-tasksmart",
+	3202:  "intraintra",
+	3203:  "netwatcher-mon",
+	3204:  "netwatcher-db",
+	3205:  "isns",
+	3206:  "ironmail",
+	3207:  "vx-auth-port",
+	3208:  "pfu-prcallback",
+	3209:  "netwkpathengine",
+	3210:  "flamenco-proxy",
+	3211:  "avsecuremgmt",
+	3212:  "surveyinst",
+	3213:  "neon24x7",
+	3214:  "jmq-daemon-1",
+	3215:  "jmq-daemon-2",
+	3216:  "ferrari-foam",
+	3217:  "unite",
+	3218:  "smartpackets",
+	3219:  "wms-messenger",
+	3220:  "xnm-ssl",
+	3221:  "xnm-clear-text",
+	3222:  "glbp",
+	3223:  "digivote",
+	3224:  "aes-discovery",
+	3225:  "fcip-port",
+	3226:  "isi-irp",
+	3227:  "dwnmshttp",
+	3228:  "dwmsgserver",
+	3229:  "global-cd-port",
+	3230:  "sftdst-port",
+	3231:  "vidigo",
+	3232:  "mdtp",
+	3233:  "whisker",
+	3234:  "alchemy",
+	3235:  "mdap-port",
+	3236:  "apparenet-ts",
+	3237:  "apparenet-tps",
+	3238:  "apparenet-as",
+	3239:  "apparenet-ui",
+	3240:  "triomotion",
+	3241:  "sysorb",
+	3242:  "sdp-id-port",
+	3243:  "timelot",
+	3244:  "onesaf",
+	3245:  "vieo-fe",
+	3246:  "dvt-system",
+	3247:  "dvt-data",
+	3248:  "procos-lm",
+	3249:  "ssp",
+	3250:  "hicp",
+	3251:  "sysscanner",
+	3252:  "dhe",
+	3253:  "pda-data",
+	3254:  "pda-sys",
+	3255:  "semaphore",
+	3256:  "cpqrpm-agent",
+	3257:  "cpqrpm-server",
+	3258:  "ivecon-port",
+	3259:  "epncdp2",
+	3260:  "iscsi-target",
+	3261:  "winshadow",
+	3262:  "necp",
+	3263:  "ecolor-imager",
+	3264:  "ccmail",
+	3265:  "altav-tunnel",
+	3266:  "ns-cfg-server",
+	3267:  "ibm-dial-out",
+	3268:  "msft-gc",
+	3269:  "msft-gc-ssl",
+	3270:  "verismart",
+	3271:  "csoft-prev",
+	3272:  "user-manager",
+	3273:  "sxmp",
+	3274:  "ordinox-server",
+	3275:  "samd",
+	3276:  "maxim-asics",
+	3277:  "awg-proxy",
+	3278:  "lkcmserver",
+	3279:  "admind",
+	3280:  "vs-server",
+	3281:  "sysopt",
+	3282:  "datusorb",
+	3283:  "Apple Remote Desktop (Net Assistant)",
+	3284:  "4talk",
+	3285:  "plato",
+	3286:  "e-net",
+	3287:  "directvdata",
+	3288:  "cops",
+	3289:  "enpc",
+	3290:  "caps-lm",
+	3291:  "sah-lm",
+	3292:  "cart-o-rama",
+	3293:  "fg-fps",
+	3294:  "fg-gip",
+	3295:  "dyniplookup",
+	3296:  "rib-slm",
+	3297:  "cytel-lm",
+	3298:  "deskview",
+	3299:  "pdrncs",
+	3302:  "mcs-fastmail",
+	3303:  "opsession-clnt",
+	3304:  "opsession-srvr",
+	3305:  "odette-ftp",
+	3306:  "mysql",
+	3307:  "opsession-prxy",
+	3308:  "tns-server",
+	3309:  "tns-adv",
+	3310:  "dyna-access",
+	3311:  "mcns-tel-ret",
+	3312:  "appman-server",
+	3313:  "uorb",
+	3314:  "uohost",
+	3315:  "cdid",
+	3316:  "aicc-cmi",
+	3317:  "vsaiport",
+	3318:  "ssrip",
+	3319:  "sdt-lmd",
+	3320:  "officelink2000",
+	3321:  "vnsstr",
+	3326:  "sftu",
+	3327:  "bbars",
+	3328:  "egptlm",
+	3329:  "hp-device-disc",
+	3330:  "mcs-calypsoicf",
+	3331:  "mcs-messaging",
+	3332:  "mcs-mailsvr",
+	3333:  "dec-notes",
+	3334:  "directv-web",
+	3335:  "directv-soft",
+	3336:  "directv-tick",
+	3337:  "directv-catlg",
+	3338:  "anet-b",
+	3339:  "anet-l",
+	3340:  "anet-m",
+	3341:  "anet-h",
+	3342:  "webtie",
+	3343:  "ms-cluster-net",
+	3344:  "bnt-manager",
+	3345:  "influence",
+	3346:  "trnsprntproxy",
+	3347:  "phoenix-rpc",
+	3348:  "pangolin-laser",
+	3349:  "chevinservices",
+	3350:  "findviatv",
+	3351:  "btrieve",
+	3352:  "ssql",
+	3353:  "fatpipe",
+	3354:  "suitjd",
+	3355:  "ordinox-dbase",
+	3356:  "upnotifyps",
+	3357:  "adtech-test",
+	3358:  "mpsysrmsvr",
+	3359:  "wg-netforce",
+	3360:  "kv-server",
+	3361:  "kv-agent",
+	3362:  "dj-ilm",
+	3363:  "nati-vi-server",
+	3364:  "creativeserver",
+	3365:  "contentserver",
+	3366:  "creativepartnr",
+	3372:  "tip2",
+	3373:  "lavenir-lm",
+	3374:  "cluster-disc",
+	3375:  "vsnm-agent",
+	3376:  "cdbroker",
+	3377:  "cogsys-lm",
+	3378:  "wsicopy",
+	3379:  "socorfs",
+	3380:  "sns-channels",
+	3381:  "geneous",
+	3382:  "fujitsu-neat",
+	3383:  "esp-lm",
+	3384:  "hp-clic",
+	3385:  "qnxnetman",
+	3386:  "gprs-sig",
+	3387:  "backroomnet",
+	3388:  "cbserver",
+	3389:  "ms-wbt-server",
+	3390:  "dsc",
+	3391:  "savant",
+	3392:  "efi-lm",
+	3393:  "d2k-tapestry1",
+	3394:  "d2k-tapestry2",
+	3395:  "dyna-lm",
+	3396:  "printer-agent",
+	3397:  "cloanto-lm",
+	3398:  "mercantile",
+	3399:  "csms",
+	3400:  "csms2",
+	3401:  "filecast",
+	3402:  "fxaengine-net",
+	3405:  "nokia-ann-ch1",
+	3406:  "nokia-ann-ch2",
+	3407:  "ldap-admin",
+	3408:  "BESApi",
+	3409:  "networklens",
+	3410:  "networklenss",
+	3411:  "biolink-auth",
+	3412:  "xmlblaster",
+	3413:  "svnet",
+	3414:  "wip-port",
+	3415:  "bcinameservice",
+	3416:  "commandport",
+	3417:  "csvr",
+	3418:  "rnmap",
+	3419:  "softaudit",
+	3420:  "ifcp-port",
+	3421:  "bmap",
+	3422:  "rusb-sys-port",
+	3423:  "xtrm",
+	3424:  "xtrms",
+	3425:  "agps-port",
+	3426:  "arkivio",
+	3427:  "websphere-snmp",
+	3428:  "twcss",
+	3429:  "gcsp",
+	3430:  "ssdispatch",
+	3431:  "ndl-als",
+	3432:  "osdcp",
+	3433:  "opnet-smp",
+	3434:  "opencm",
+	3435:  "pacom",
+	3436:  "gc-config",
+	3437:  "autocueds",
+	3438:  "spiral-admin",
+	3439:  "hri-port",
+	3440:  "ans-console",
+	3441:  "connect-client",
+	3442:  "connect-server",
+	3443:  "ov-nnm-websrv",
+	3444:  "denali-server",
+	3445:  "monp",
+	3446:  "3comfaxrpc",
+	3447:  "directnet",
+	3448:  "dnc-port",
+	3449:  "hotu-chat",
+	3450:  "castorproxy",
+	3451:  "asam",
+	3452:  "sabp-signal",
+	3453:  "pscupd",
+	3454:  "mira",
+	3455:  "prsvp",
+	3456:  "vat",
+	3457:  "vat-control",
+	3458:  "d3winosfi",
+	3459:  "integral",
+	3460:  "edm-manager",
+	3461:  "edm-stager",
+	3462:  "edm-std-notify",
+	3463:  "edm-adm-notify",
+	3464:  "edm-mgr-sync",
+	3465:  "edm-mgr-cntrl",
+	3466:  "workflow",
+	3467:  "rcst",
+	3468:  "ttcmremotectrl",
+	3469:  "pluribus",
+	3470:  "jt400",
+	3471:  "jt400-ssl",
+	3472:  "jaugsremotec-1",
+	3473:  "jaugsremotec-2",
+	3474:  "ttntspauto",
+	3475:  "genisar-port",
+	3476:  "nppmp",
+	3477:  "ecomm",
+	3478:  "stun",
+	3479:  "twrpc",
+	3480:  "plethora",
+	3481:  "cleanerliverc",
+	3482:  "vulture",
+	3483:  "slim-devices",
+	3484:  "gbs-stp",
+	3485:  "celatalk",
+	3486:  "ifsf-hb-port",
+	3487:  "ltcudp",
+	3488:  "fs-rh-srv",
+	3489:  "dtp-dia",
+	3490:  "colubris",
+	3491:  "swr-port",
+	3492:  "tvdumtray-port",
+	3493:  "nut",
+	3494:  "ibm3494",
+	3495:  "seclayer-tcp",
+	3496:  "seclayer-tls",
+	3497:  "ipether232port",
+	3498:  "dashpas-port",
+	3499:  "sccip-media",
+	3500:  "rtmp-port",
+	3501:  "isoft-p2p",
+	3502:  "avinstalldisc",
+	3503:  "lsp-ping",
+	3504:  "ironstorm",
+	3505:  "ccmcomm",
+	3506:  "apc-3506",
+	3507:  "nesh-broker",
+	3508:  "interactionweb",
+	3509:  "vt-ssl",
+	3510:  "xss-port",
+	3511:  "webmail-2",
+	3512:  "aztec",
+	3513:  "arcpd",
+	3514:  "must-p2p",
+	3515:  "must-backplane",
+	3516:  "smartcard-port",
+	3517:  "802-11-iapp",
+	3518:  "artifact-msg",
+	3519:  "galileo",
+	3520:  "galileolog",
+	3521:  "mc3ss",
+	3522:  "nssocketport",
+	3523:  "odeumservlink",
+	3524:  "ecmport",
+	3525:  "eisport",
+	3526:  "starquiz-port",
+	3527:  "beserver-msg-q",
+	3528:  "jboss-iiop",
+	3529:  "jboss-iiop-ssl",
+	3530:  "gf",
+	3531:  "joltid",
+	3532:  "raven-rmp",
+	3533:  "raven-rdp",
+	3534:  "urld-port",
+	3535:  "ms-la",
+	3536:  "snac",
+	3537:  "ni-visa-remote",
+	3538:  "ibm-diradm",
+	3539:  "ibm-diradm-ssl",
+	3540:  "pnrp-port",
+	3541:  "voispeed-port",
+	3542:  "hacl-monitor",
+	3543:  "qftest-lookup",
+	3544:  "teredo",
+	3545:  "camac",
+	3547:  "symantec-sim",
+	3548:  "interworld",
+	3549:  "tellumat-nms",
+	3550:  "ssmpp",
+	3551:  "apcupsd",
+	3552:  "taserver",
+	3553:  "rbr-discovery",
+	3554:  "questnotify",
+	3555:  "razor",
+	3556:  "sky-transport",
+	3557:  "personalos-001",
+	3558:  "mcp-port",
+	3559:  "cctv-port",
+	3560:  "iniserve-port",
+	3561:  "bmc-onekey",
+	3562:  "sdbproxy",
+	3563:  "watcomdebug",
+	3564:  "esimport",
+	3567:  "dof-eps",
+	3568:  "dof-tunnel-sec",
+	3569:  "mbg-ctrl",
+	3570:  "mccwebsvr-port",
+	3571:  "megardsvr-port",
+	3572:  "megaregsvrport",
+	3573:  "tag-ups-1",
+	3574:  "dmaf-caster",
+	3575:  "ccm-port",
+	3576:  "cmc-port",
+	3577:  "config-port",
+	3578:  "data-port",
+	3579:  "ttat3lb",
+	3580:  "nati-svrloc",
+	3581:  "kfxaclicensing",
+	3582:  "press",
+	3583:  "canex-watch",
+	3584:  "u-dbap",
+	3585:  "emprise-lls",
+	3586:  "emprise-lsc",
+	3587:  "p2pgroup",
+	3588:  "sentinel",
+	3589:  "isomair",
+	3590:  "wv-csp-sms",
+	3591:  "gtrack-server",
+	3592:  "gtrack-ne",
+	3593:  "bpmd",
+	3594:  "mediaspace",
+	3595:  "shareapp",
+	3596:  "iw-mmogame",
+	3597:  "a14",
+	3598:  "a15",
+	3599:  "quasar-server",
+	3600:  "trap-daemon",
+	3601:  "visinet-gui",
+	3602:  "infiniswitchcl",
+	3603:  "int-rcv-cntrl",
+	3604:  "bmc-jmx-port",
+	3605:  "comcam-io",
+	3606:  "splitlock",
+	3607:  "precise-i3",
+	3608:  "trendchip-dcp",
+	3609:  "cpdi-pidas-cm",
+	3610:  "echonet",
+	3611:  "six-degrees",
+	3612:  "hp-dataprotect",
+	3613:  "alaris-disc",
+	3614:  "sigma-port",
+	3615:  "start-network",
+	3616:  "cd3o-protocol",
+	3617:  "sharp-server",
+	3618:  "aairnet-1",
+	3619:  "aairnet-2",
+	3620:  "ep-pcp",
+	3621:  "ep-nsp",
+	3622:  "ff-lr-port",
+	3623:  "haipe-discover",
+	3624:  "dist-upgrade",
+	3625:  "volley",
+	3626:  "bvcdaemon-port",
+	3627:  "jamserverport",
+	3628:  "ept-machine",
+	3629:  "escvpnet",
+	3630:  "cs-remote-db",
+	3631:  "cs-services",
+	3632:  "distcc",
+	3633:  "wacp",
+	3634:  "hlibmgr",
+	3635:  "sdo",
+	3636:  "servistaitsm",
+	3637:  "scservp",
+	3638:  "ehp-backup",
+	3639:  "xap-ha",
+	3640:  "netplay-port1",
+	3641:  "netplay-port2",
+	3642:  "juxml-port",
+	3643:  "audiojuggler",
+	3644:  "ssowatch",
+	3645:  "cyc",
+	3646:  "xss-srv-port",
+	3647:  "splitlock-gw",
+	3648:  "fjcp",
+	3649:  "nmmp",
+	3650:  "prismiq-plugin",
+	3651:  "xrpc-registry",
+	3652:  "vxcrnbuport",
+	3653:  "tsp",
+	3654:  "vaprtm",
+	3655:  "abatemgr",
+	3656:  "abatjss",
+	3657:  "immedianet-bcn",
+	3658:  "ps-ams",
+	3659:  "apple-sasl",
+	3660:  "can-nds-ssl",
+	3661:  "can-ferret-ssl",
+	3662:  "pserver",
+	3663:  "dtp",
+	3664:  "ups-engine",
+	3665:  "ent-engine",
+	3666:  "eserver-pap",
+	3667:  "infoexch",
+	3668:  "dell-rm-port",
+	3669:  "casanswmgmt",
+	3670:  "smile",
+	3671:  "efcp",
+	3672:  "lispworks-orb",
+	3673:  "mediavault-gui",
+	3674:  "wininstall-ipc",
+	3675:  "calltrax",
+	3676:  "va-pacbase",
+	3677:  "roverlog",
+	3678:  "ipr-dglt",
+	3679:  "Escale (Newton Dock)",
+	3680:  "npds-tracker",
+	3681:  "bts-x73",
+	3682:  "cas-mapi",
+	3683:  "bmc-ea",
+	3684:  "faxstfx-port",
+	3685:  "dsx-agent",
+	3686:  "tnmpv2",
+	3687:  "simple-push",
+	3688:  "simple-push-s",
+	3689:  "daap",
+	3690:  "svn",
+	3691:  "magaya-network",
+	3692:  "intelsync",
+	3695:  "bmc-data-coll",
+	3696:  "telnetcpcd",
+	3697:  "nw-license",
+	3698:  "sagectlpanel",
+	3699:  "kpn-icw",
+	3700:  "lrs-paging",
+	3701:  "netcelera",
+	3702:  "ws-discovery",
+	3703:  "adobeserver-3",
+	3704:  "adobeserver-4",
+	3705:  "adobeserver-5",
+	3706:  "rt-event",
+	3707:  "rt-event-s",
+	3708:  "sun-as-iiops",
+	3709:  "ca-idms",
+	3710:  "portgate-auth",
+	3711:  "edb-server2",
+	3712:  "sentinel-ent",
+	3713:  "tftps",
+	3714:  "delos-dms",
+	3715:  "anoto-rendezv",
+	3716:  "wv-csp-sms-cir",
+	3717:  "wv-csp-udp-cir",
+	3718:  "opus-services",
+	3719:  "itelserverport",
+	3720:  "ufastro-instr",
+	3721:  "xsync",
+	3722:  "xserveraid",
+	3723:  "sychrond",
+	3724:  "blizwow",
+	3725:  "na-er-tip",
+	3726:  "array-manager",
+	3727:  "e-mdu",
+	3728:  "e-woa",
+	3729:  "fksp-audit",
+	3730:  "client-ctrl",
+	3731:  "smap",
+	3732:  "m-wnn",
+	3733:  "multip-msg",
+	3734:  "synel-data",
+	3735:  "pwdis",
+	3736:  "rs-rmi",
+	3738:  "versatalk",
+	3739:  "launchbird-lm",
+	3740:  "heartbeat",
+	3741:  "wysdma",
+	3742:  "cst-port",
+	3743:  "ipcs-command",
+	3744:  "sasg",
+	3745:  "gw-call-port",
+	3746:  "linktest",
+	3747:  "linktest-s",
+	3748:  "webdata",
+	3749:  "cimtrak",
+	3750:  "cbos-ip-port",
+	3751:  "gprs-cube",
+	3752:  "vipremoteagent",
+	3753:  "nattyserver",
+	3754:  "timestenbroker",
+	3755:  "sas-remote-hlp",
+	3756:  "canon-capt",
+	3757:  "grf-port",
+	3758:  "apw-registry",
+	3759:  "exapt-lmgr",
+	3760:  "adtempusclient",
+	3761:  "gsakmp",
+	3762:  "gbs-smp",
+	3763:  "xo-wave",
+	3764:  "mni-prot-rout",
+	3765:  "rtraceroute",
+	3767:  "listmgr-port",
+	3768:  "rblcheckd",
+	3769:  "haipe-otnk",
+	3770:  "cindycollab",
+	3771:  "paging-port",
+	3772:  "ctp",
+	3773:  "ctdhercules",
+	3774:  "zicom",
+	3775:  "ispmmgr",
+	3776:  "dvcprov-port",
+	3777:  "jibe-eb",
+	3778:  "c-h-it-port",
+	3779:  "cognima",
+	3780:  "nnp",
+	3781:  "abcvoice-port",
+	3782:  "iso-tp0s",
+	3783:  "bim-pem",
+	3784:  "bfd-control",
+	3785:  "bfd-echo",
+	3786:  "upstriggervsw",
+	3787:  "fintrx",
+	3788:  "isrp-port",
+	3789:  "remotedeploy",
+	3790:  "quickbooksrds",
+	3791:  "tvnetworkvideo",
+	3792:  "sitewatch",
+	3793:  "dcsoftware",
+	3794:  "jaus",
+	3795:  "myblast",
+	3796:  "spw-dialer",
+	3797:  "idps",
+	3798:  "minilock",
+	3799:  "radius-dynauth",
+	3800:  "pwgpsi",
+	3801:  "ibm-mgr",
+	3802:  "vhd",
+	3803:  "soniqsync",
+	3804:  "iqnet-port",
+	3805:  "tcpdataserver",
+	3806:  "wsmlb",
+	3807:  "spugna",
+	3808:  "sun-as-iiops-ca",
+	3809:  "apocd",
+	3810:  "wlanauth",
+	3811:  "amp",
+	3812:  "neto-wol-server",
+	3813:  "rap-ip",
+	3814:  "neto-dcs",
+	3815:  "lansurveyorxml",
+	3816:  "sunlps-http",
+	3817:  "tapeware",
+	3818:  "crinis-hb",
+	3819:  "epl-slp",
+	3820:  "scp",
+	3821:  "pmcp",
+	3822:  "acp-discovery",
+	3823:  "acp-conduit",
+	3824:  "acp-policy",
+	3825:  "ffserver",
+	3826:  "warmux",
+	3827:  "netmpi",
+	3828:  "neteh",
+	3829:  "neteh-ext",
+	3830:  "cernsysmgmtagt",
+	3831:  "dvapps",
+	3832:  "xxnetserver",
+	3833:  "aipn-auth",
+	3834:  "spectardata",
+	3835:  "spectardb",
+	3836:  "markem-dcp",
+	3837:  "mkm-discovery",
+	3838:  "sos",
+	3839:  "amx-rms",
+	3840:  "flirtmitmir",
+	3842:  "nhci",
+	3843:  "quest-agent",
+	3844:  "rnm",
+	3845:  "v-one-spp",
+	3846:  "an-pcp",
+	3847:  "msfw-control",
+	3848:  "item",
+	3849:  "spw-dnspreload",
+	3850:  "qtms-bootstrap",
+	3851:  "spectraport",
+	3852:  "sse-app-config",
+	3853:  "sscan",
+	3854:  "stryker-com",
+	3855:  "opentrac",
+	3856:  "informer",
+	3857:  "trap-port",
+	3858:  "trap-port-mom",
+	3859:  "nav-port",
+	3860:  "sasp",
+	3861:  "winshadow-hd",
+	3862:  "giga-pocket",
+	3863:  "asap-udp",
+	3865:  "xpl",
+	3866:  "dzdaemon",
+	3867:  "dzoglserver",
+	3869:  "ovsam-mgmt",
+	3870:  "ovsam-d-agent",
+	3871:  "avocent-adsap",
+	3872:  "oem-agent",
+	3873:  "fagordnc",
+	3874:  "sixxsconfig",
+	3875:  "pnbscada",
+	3876:  "dl-agent",
+	3877:  "xmpcr-interface",
+	3878:  "fotogcad",
+	3879:  "appss-lm",
+	3880:  "igrs",
+	3881:  "idac",
+	3882:  "msdts1",
+	3883:  "vrpn",
+	3884:  "softrack-meter",
+	3885:  "topflow-ssl",
+	3886:  "nei-management",
+	3887:  "ciphire-data",
+	3888:  "ciphire-serv",
+	3889:  "dandv-tester",
+	3890:  "ndsconnect",
+	3891:  "rtc-pm-port",
+	3892:  "pcc-image-port",
+	3893:  "cgi-starapi",
+	3894:  "syam-agent",
+	3895:  "syam-smc",
+	3896:  "sdo-tls",
+	3897:  "sdo-ssh",
+	3898:  "senip",
+	3899:  "itv-control",
+	3900:  "udt-os",
+	3901:  "nimsh",
+	3902:  "nimaux",
+	3903:  "charsetmgr",
+	3904:  "omnilink-port",
+	3905:  "mupdate",
+	3906:  "topovista-data",
+	3907:  "imoguia-port",
+	3908:  "hppronetman",
+	3909:  "surfcontrolcpa",
+	3910:  "prnrequest",
+	3911:  "prnstatus",
+	3912:  "gbmt-stars",
+	3913:  "listcrt-port",
+	3914:  "listcrt-port-2",
+	3915:  "agcat",
+	3916:  "wysdmc",
+	3917:  "aftmux",
+	3918:  "pktcablemmcops",
+	3919:  "hyperip",
+	3920:  "exasoftport1",
+	3921:  "herodotus-net",
+	3922:  "sor-update",
+	3923:  "symb-sb-port",
+	3924:  "mpl-gprs-port",
+	3925:  "zmp",
+	3926:  "winport",
+	3927:  "natdataservice",
+	3928:  "netboot-pxe",
+	3929:  "smauth-port",
+	3930:  "syam-webserver",
+	3931:  "msr-plugin-port",
+	3932:  "dyn-site",
+	3933:  "plbserve-port",
+	3934:  "sunfm-port",
+	3935:  "sdp-portmapper",
+	3936:  "mailprox",
+	3937:  "dvbservdsc",
+	3938:  "dbcontrol-agent",
+	3939:  "aamp",
+	3940:  "xecp-node",
+	3941:  "homeportal-web",
+	3942:  "srdp",
+	3943:  "tig",
+	3944:  "sops",
+	3945:  "emcads",
+	3946:  "backupedge",
+	3947:  "ccp",
+	3948:  "apdap",
+	3949:  "drip",
+	3950:  "namemunge",
+	3951:  "pwgippfax",
+	3952:  "i3-sessionmgr",
+	3953:  "xmlink-connect",
+	3954:  "adrep",
+	3955:  "p2pcommunity",
+	3956:  "gvcp",
+	3957:  "mqe-broker",
+	3958:  "mqe-agent",
+	3959:  "treehopper",
+	3960:  "bess",
+	3961:  "proaxess",
+	3962:  "sbi-agent",
+	3963:  "thrp",
+	3964:  "sasggprs",
+	3965:  "ati-ip-to-ncpe",
+	3966:  "bflckmgr",
+	3967:  "ppsms",
+	3968:  "ianywhere-dbns",
+	3969:  "landmarks",
+	3970:  "lanrevagent",
+	3971:  "lanrevserver",
+	3972:  "iconp",
+	3973:  "progistics",
+	3974:  "citysearch",
+	3975:  "airshot",
+	3976:  "opswagent",
+	3977:  "opswmanager",
+	3978:  "secure-cfg-svr",
+	3979:  "smwan",
+	3980:  "acms",
+	3981:  "starfish",
+	3982:  "eis",
+	3983:  "eisp",
+	3984:  "mapper-nodemgr",
+	3985:  "mapper-mapethd",
+	3986:  "mapper-ws-ethd",
+	3987:  "centerline",
+	3988:  "dcs-config",
+	3989:  "bv-queryengine",
+	3990:  "bv-is",
+	3991:  "bv-smcsrv",
+	3992:  "bv-ds",
+	3993:  "bv-agent",
+	3995:  "iss-mgmt-ssl",
+	3996:  "abcsoftware",
+	3997:  "agentsease-db",
+	3998:  "dnx",
+	3999:  "nvcnet",
+	4000:  "terabase",
+	4001:  "newoak",
+	4002:  "pxc-spvr-ft",
+	4003:  "pxc-splr-ft",
+	4004:  "pxc-roid",
+	4005:  "pxc-pin",
+	4006:  "pxc-spvr",
+	4007:  "pxc-splr",
+	4008:  "netcheque",
+	4009:  "chimera-hwm",
+	4010:  "samsung-unidex",
+	4011:  "altserviceboot",
+	4012:  "pda-gate",
+	4013:  "acl-manager",
+	4014:  "taiclock",
+	4015:  "talarian-mcast1",
+	4016:  "talarian-mcast2",
+	4017:  "talarian-mcast3",
+	4018:  "talarian-mcast4",
+	4019:  "talarian-mcast5",
+	4020:  "trap",
+	4021:  "nexus-portal",
+	4022:  "dnox",
+	4023:  "esnm-zoning",
+	4024:  "tnp1-port",
+	4025:  "partimage",
+	4026:  "as-debug",
+	4027:  "bxp",
+	4028:  "dtserver-port",
+	4029:  "ip-qsig",
+	4030:  "jdmn-port",
+	4031:  "suucp",
+	4032:  "vrts-auth-port",
+	4033:  "sanavigator",
+	4034:  "ubxd",
+	4035:  "wap-push-http",
+	4036:  "wap-push-https",
+	4037:  "ravehd",
+	4038:  "fazzt-ptp",
+	4039:  "fazzt-admin",
+	4040:  "yo-main",
+	4041:  "houston",
+	4042:  "ldxp",
+	4043:  "nirp",
+	4044:  "ltp",
+	4045:  "npp",
+	4046:  "acp-proto",
+	4047:  "ctp-state",
+	4049:  "wafs",
+	4050:  "cisco-wafs",
+	4051:  "cppdp",
+	4052:  "interact",
+	4053:  "ccu-comm-1",
+	4054:  "ccu-comm-2",
+	4055:  "ccu-comm-3",
+	4056:  "lms",
+	4057:  "wfm",
+	4058:  "kingfisher",
+	4059:  "dlms-cosem",
+	4060:  "dsmeter-iatc",
+	4061:  "ice-location",
+	4062:  "ice-slocation",
+	4063:  "ice-router",
+	4064:  "ice-srouter",
+	4065:  "avanti-cdp",
+	4066:  "pmas",
+	4067:  "idp",
+	4068:  "ipfltbcst",
+	4069:  "minger",
+	4070:  "tripe",
+	4071:  "aibkup",
+	4072:  "zieto-sock",
+	4073:  "iRAPP",
+	4074:  "cequint-cityid",
+	4075:  "perimlan",
+	4076:  "seraph",
+	4077:  "ascomalarm",
+	4079:  "santools",
+	4080:  "lorica-in",
+	4081:  "lorica-in-sec",
+	4082:  "lorica-out",
+	4083:  "lorica-out-sec",
+	4084:  "fortisphere-vm",
+	4086:  "ftsync",
+	4089:  "opencore",
+	4090:  "omasgport",
+	4091:  "ewinstaller",
+	4092:  "ewdgs",
+	4093:  "pvxpluscs",
+	4094:  "sysrqd",
+	4095:  "xtgui",
+	4096:  "bre",
+	4097:  "patrolview",
+	4098:  "drmsfsd",
+	4099:  "dpcp",
+	4100:  "igo-incognito",
+	4101:  "brlp-0",
+	4102:  "brlp-1",
+	4103:  "brlp-2",
+	4104:  "brlp-3",
+	4105:  "shofar",
+	4106:  "synchronite",
+	4107:  "j-ac",
+	4108:  "accel",
+	4109:  "izm",
+	4110:  "g2tag",
+	4111:  "xgrid",
+	4112:  "apple-vpns-rp",
+	4113:  "aipn-reg",
+	4114:  "jomamqmonitor",
+	4115:  "cds",
+	4116:  "smartcard-tls",
+	4117:  "hillrserv",
+	4118:  "netscript",
+	4119:  "assuria-slm",
+	4121:  "e-builder",
+	4122:  "fprams",
+	4123:  "z-wave",
+	4124:  "tigv2",
+	4125:  "opsview-envoy",
+	4126:  "ddrepl",
+	4127:  "unikeypro",
+	4128:  "nufw",
+	4129:  "nuauth",
+	4130:  "fronet",
+	4131:  "stars",
+	4132:  "nuts-dem",
+	4133:  "nuts-bootp",
+	4134:  "nifty-hmi",
+	4135:  "cl-db-attach",
+	4136:  "cl-db-request",
+	4137:  "cl-db-remote",
+	4138:  "nettest",
+	4139:  "thrtx",
+	4140:  "cedros-fds",
+	4141:  "oirtgsvc",
+	4142:  "oidocsvc",
+	4143:  "oidsr",
+	4145:  "vvr-control",
+	4146:  "tgcconnect",
+	4147:  "vrxpservman",
+	4148:  "hhb-handheld",
+	4149:  "agslb",
+	4150:  "PowerAlert-nsa",
+	4151:  "menandmice-noh",
+	4152:  "idig-mux",
+	4153:  "mbl-battd",
+	4154:  "atlinks",
+	4155:  "bzr",
+	4156:  "stat-results",
+	4157:  "stat-scanner",
+	4158:  "stat-cc",
+	4159:  "nss",
+	4160:  "jini-discovery",
+	4161:  "omscontact",
+	4162:  "omstopology",
+	4163:  "silverpeakpeer",
+	4164:  "silverpeakcomm",
+	4165:  "altcp",
+	4166:  "joost",
+	4167:  "ddgn",
+	4168:  "pslicser",
+	4169:  "iadt-disc",
+	4172:  "pcoip",
+	4173:  "mma-discovery",
+	4174:  "sm-disc",
+	4177:  "wello",
+	4178:  "storman",
+	4179:  "MaxumSP",
+	4180:  "httpx",
+	4181:  "macbak",
+	4182:  "pcptcpservice",
+	4183:  "cyborgnet",
+	4184:  "universe-suite",
+	4185:  "wcpp",
+	4188:  "vatata",
+	4191:  "dsmipv6",
+	4192:  "azeti-bd",
+	4197:  "hctl",
+	4199:  "eims-admin",
+	4300:  "corelccam",
+	4301:  "d-data",
+	4302:  "d-data-control",
+	4303:  "srcp",
+	4304:  "owserver",
+	4305:  "batman",
+	4306:  "pinghgl",
+	4307:  "trueconf",
+	4308:  "compx-lockview",
+	4309:  "dserver",
+	4310:  "mirrtex",
+	4320:  "fdt-rcatp",
+	4321:  "rwhois",
+	4322:  "trim-event",
+	4323:  "trim-ice",
+	4325:  "geognosisman",
+	4326:  "geognosis",
+	4327:  "jaxer-web",
+	4328:  "jaxer-manager",
+	4333:  "ahsp",
+	4340:  "gaia",
+	4341:  "lisp-data",
+	4342:  "lisp-control",
+	4343:  "unicall",
+	4344:  "vinainstall",
+	4345:  "m4-network-as",
+	4346:  "elanlm",
+	4347:  "lansurveyor",
+	4348:  "itose",
+	4349:  "fsportmap",
+	4350:  "net-device",
+	4351:  "plcy-net-svcs",
+	4352:  "pjlink",
+	4353:  "f5-iquery",
+	4354:  "qsnet-trans",
+	4355:  "qsnet-workst",
+	4356:  "qsnet-assist",
+	4357:  "qsnet-cond",
+	4358:  "qsnet-nucl",
+	4359:  "omabcastltkm",
+	4361:  "nacnl",
+	4362:  "afore-vdp-disc",
+	4366:  "shadowstream",
+	4368:  "wxbrief",
+	4369:  "epmd",
+	4370:  "elpro-tunnel",
+	4371:  "l2c-disc",
+	4372:  "l2c-data",
+	4373:  "remctl",
+	4375:  "tolteces",
+	4376:  "bip",
+	4377:  "cp-spxsvr",
+	4378:  "cp-spxdpy",
+	4379:  "ctdb",
+	4389:  "xandros-cms",
+	4390:  "wiegand",
+	4394:  "apwi-disc",
+	4395:  "omnivisionesx",
+	4400:  "ds-srv",
+	4401:  "ds-srvr",
+	4402:  "ds-clnt",
+	4403:  "ds-user",
+	4404:  "ds-admin",
+	4405:  "ds-mail",
+	4406:  "ds-slp",
+	4412:  "smallchat",
+	4413:  "avi-nms-disc",
+	4416:  "pjj-player-disc",
+	4418:  "axysbridge",
+	4420:  "nvm-express",
+	4425:  "netrockey6",
+	4426:  "beacon-port-2",
+	4430:  "rsqlserver",
+	4432:  "l-acoustics",
+	4441:  "netblox",
+	4442:  "saris",
+	4443:  "pharos",
+	4444:  "krb524",
+	4445:  "upnotifyp",
+	4446:  "n1-fwp",
+	4447:  "n1-rmgmt",
+	4448:  "asc-slmd",
+	4449:  "privatewire",
+	4450:  "camp",
+	4451:  "ctisystemmsg",
+	4452:  "ctiprogramload",
+	4453:  "nssalertmgr",
+	4454:  "nssagentmgr",
+	4455:  "prchat-user",
+	4456:  "prchat-server",
+	4457:  "prRegister",
+	4458:  "mcp",
+	4484:  "hpssmgmt",
+	4486:  "icms",
+	4488:  "awacs-ice",
+	4500:  "ipsec-nat-t",
+	4534:  "armagetronad",
+	4535:  "ehs",
+	4536:  "ehs-ssl",
+	4537:  "wssauthsvc",
+	4538:  "swx-gate",
+	4545:  "worldscores",
+	4546:  "sf-lm",
+	4547:  "lanner-lm",
+	4548:  "synchromesh",
+	4549:  "aegate",
+	4550:  "gds-adppiw-db",
+	4551:  "ieee-mih",
+	4552:  "menandmice-mon",
+	4554:  "msfrs",
+	4555:  "rsip",
+	4556:  "dtn-bundle",
+	4557:  "mtcevrunqss",
+	4558:  "mtcevrunqman",
+	4559:  "hylafax",
+	4566:  "kwtc",
+	4567:  "tram",
+	4568:  "bmc-reporting",
+	4569:  "iax",
+	4591:  "l3t-at-an",
+	4592:  "hrpd-ith-at-an",
+	4593:  "ipt-anri-anri",
+	4594:  "ias-session",
+	4595:  "ias-paging",
+	4596:  "ias-neighbor",
+	4597:  "a21-an-1xbs",
+	4598:  "a16-an-an",
+	4599:  "a17-an-an",
+	4600:  "piranha1",
+	4601:  "piranha2",
+	4621:  "ventoso",
+	4658:  "playsta2-app",
+	4659:  "playsta2-lob",
+	4660:  "smaclmgr",
+	4661:  "kar2ouche",
+	4662:  "oms",
+	4663:  "noteit",
+	4664:  "ems",
+	4665:  "contclientms",
+	4666:  "eportcomm",
+	4667:  "mmacomm",
+	4668:  "mmaeds",
+	4669:  "eportcommdata",
+	4670:  "light",
+	4671:  "acter",
+	4672:  "rfa",
+	4673:  "cxws",
+	4674:  "appiq-mgmt",
+	4675:  "dhct-status",
+	4676:  "dhct-alerts",
+	4677:  "bcs",
+	4678:  "traversal",
+	4679:  "mgesupervision",
+	4680:  "mgemanagement",
+	4681:  "parliant",
+	4682:  "finisar",
+	4683:  "spike",
+	4684:  "rfid-rp1",
+	4685:  "autopac",
+	4686:  "msp-os",
+	4687:  "nst",
+	4688:  "mobile-p2p",
+	4689:  "altovacentral",
+	4690:  "prelude",
+	4691:  "mtn",
+	4692:  "conspiracy",
+	4700:  "netxms-agent",
+	4701:  "netxms-mgmt",
+	4702:  "netxms-sync",
+	4711:  "trinity-dist",
+	4725:  "truckstar",
+	4726:  "a26-fap-fgw",
+	4727:  "fcis-disc",
+	4728:  "capmux",
+	4729:  "gsmtap",
+	4730:  "gearman",
+	4732:  "ohmtrigger",
+	4737:  "ipdr-sp",
+	4738:  "solera-lpn",
+	4739:  "ipfix",
+	4740:  "ipfixs",
+	4741:  "lumimgrd",
+	4742:  "sicct-sdp",
+	4743:  "openhpid",
+	4744:  "ifsp",
+	4745:  "fmp",
+	4746:  "intelliadm-disc",
+	4747:  "buschtrommel",
+	4749:  "profilemac",
+	4750:  "ssad",
+	4751:  "spocp",
+	4752:  "snap",
+	4753:  "simon-disc",
+	4754:  "gre-in-udp",
+	4755:  "gre-udp-dtls",
+	4784:  "bfd-multi-ctl",
+	4785:  "cncp",
+	4789:  "vxlan",
+	4790:  "vxlan-gpe",
+	4791:  "roce",
+	4800:  "iims",
+	4801:  "iwec",
+	4802:  "ilss",
+	4803:  "notateit-disc",
+	4804:  "aja-ntv4-disc",
+	4827:  "htcp",
+	4837:  "varadero-0",
+	4838:  "varadero-1",
+	4839:  "varadero-2",
+	4840:  "opcua-udp",
+	4841:  "quosa",
+	4842:  "gw-asv",
+	4843:  "opcua-tls",
+	4844:  "gw-log",
+	4845:  "wcr-remlib",
+	4846:  "contamac-icm",
+	4847:  "wfc",
+	4848:  "appserv-http",
+	4849:  "appserv-https",
+	4850:  "sun-as-nodeagt",
+	4851:  "derby-repli",
+	4867:  "unify-debug",
+	4868:  "phrelay",
+	4869:  "phrelaydbg",
+	4870:  "cc-tracking",
+	4871:  "wired",
+	4876:  "tritium-can",
+	4877:  "lmcs",
+	4878:  "inst-discovery",
+	4881:  "socp-t",
+	4882:  "socp-c",
+	4884:  "hivestor",
+	4885:  "abbs",
+	4894:  "lyskom",
+	4899:  "radmin-port",
+	4900:  "hfcs",
+	4914:  "bones",
+	4936:  "an-signaling",
+	4937:  "atsc-mh-ssc",
+	4940:  "eq-office-4940",
+	4941:  "eq-office-4941",
+	4942:  "eq-office-4942",
+	4949:  "munin",
+	4950:  "sybasesrvmon",
+	4951:  "pwgwims",
+	4952:  "sagxtsds",
+	4969:  "ccss-qmm",
+	4970:  "ccss-qsm",
+	4980:  "ctxs-vpp",
+	4986:  "mrip",
+	4987:  "smar-se-port1",
+	4988:  "smar-se-port2",
+	4989:  "parallel",
+	4990:  "busycal",
+	4991:  "vrt",
+	4999:  "hfcs-manager",
+	5000:  "commplex-main",
+	5001:  "commplex-link",
+	5002:  "rfe",
+	5003:  "fmpro-internal",
+	5004:  "avt-profile-1",
+	5005:  "avt-profile-2",
+	5006:  "wsm-server",
+	5007:  "wsm-server-ssl",
+	5008:  "synapsis-edge",
+	5009:  "winfs",
+	5010:  "telelpathstart",
+	5011:  "telelpathattack",
+	5012:  "nsp",
+	5013:  "fmpro-v6",
+	5014:  "onpsocket",
+	5020:  "zenginkyo-1",
+	5021:  "zenginkyo-2",
+	5022:  "mice",
+	5023:  "htuilsrv",
+	5024:  "scpi-telnet",
+	5025:  "scpi-raw",
+	5026:  "strexec-d",
+	5027:  "strexec-s",
+	5029:  "infobright",
+	5030:  "surfpass",
+	5031:  "dmp",
+	5042:  "asnaacceler8db",
+	5043:  "swxadmin",
+	5044:  "lxi-evntsvc",
+	5046:  "vpm-udp",
+	5047:  "iscape",
+	5049:  "ivocalize",
+	5050:  "mmcc",
+	5051:  "ita-agent",
+	5052:  "ita-manager",
+	5053:  "rlm-disc",
+	5055:  "unot",
+	5056:  "intecom-ps1",
+	5057:  "intecom-ps2",
+	5058:  "locus-disc",
+	5059:  "sds",
+	5060:  "sip",
+	5061:  "sips",
+	5062:  "na-localise",
+	5064:  "ca-1",
+	5065:  "ca-2",
+	5066:  "stanag-5066",
+	5067:  "authentx",
+	5069:  "i-net-2000-npr",
+	5070:  "vtsas",
+	5071:  "powerschool",
+	5072:  "ayiya",
+	5073:  "tag-pm",
+	5074:  "alesquery",
+	5078:  "pixelpusher",
+	5079:  "cp-spxrpts",
+	5080:  "onscreen",
+	5081:  "sdl-ets",
+	5082:  "qcp",
+	5083:  "qfp",
+	5084:  "llrp",
+	5085:  "encrypted-llrp",
+	5092:  "magpie",
+	5093:  "sentinel-lm",
+	5094:  "hart-ip",
+	5099:  "sentlm-srv2srv",
+	5100:  "socalia",
+	5101:  "talarian-udp",
+	5102:  "oms-nonsecure",
+	5104:  "tinymessage",
+	5105:  "hughes-ap",
+	5111:  "taep-as-svc",
+	5112:  "pm-cmdsvr",
+	5116:  "emb-proj-cmd",
+	5120:  "barracuda-bbs",
+	5133:  "nbt-pc",
+	5136:  "minotaur-sa",
+	5137:  "ctsd",
+	5145:  "rmonitor-secure",
+	5150:  "atmp",
+	5151:  "esri-sde",
+	5152:  "sde-discovery",
+	5154:  "bzflag",
+	5155:  "asctrl-agent",
+	5164:  "vpa-disc",
+	5165:  "ife-icorp",
+	5166:  "winpcs",
+	5167:  "scte104",
+	5168:  "scte30",
+	5190:  "aol",
+	5191:  "aol-1",
+	5192:  "aol-2",
+	5193:  "aol-3",
+	5200:  "targus-getdata",
+	5201:  "targus-getdata1",
+	5202:  "targus-getdata2",
+	5203:  "targus-getdata3",
+	5223:  "hpvirtgrp",
+	5224:  "hpvirtctrl",
+	5225:  "hp-server",
+	5226:  "hp-status",
+	5227:  "perfd",
+	5234:  "eenet",
+	5235:  "galaxy-network",
+	5236:  "padl2sim",
+	5237:  "mnet-discovery",
+	5245:  "downtools-disc",
+	5246:  "capwap-control",
+	5247:  "capwap-data",
+	5248:  "caacws",
+	5249:  "caaclang2",
+	5250:  "soagateway",
+	5251:  "caevms",
+	5252:  "movaz-ssc",
+	5264:  "3com-njack-1",
+	5265:  "3com-njack-2",
+	5270:  "cartographerxmp",
+	5271:  "cuelink-disc",
+	5272:  "pk",
+	5282:  "transmit-port",
+	5298:  "presence",
+	5299:  "nlg-data",
+	5300:  "hacl-hb",
+	5301:  "hacl-gs",
+	5302:  "hacl-cfg",
+	5303:  "hacl-probe",
+	5304:  "hacl-local",
+	5305:  "hacl-test",
+	5306:  "sun-mc-grp",
+	5307:  "sco-aip",
+	5308:  "cfengine",
+	5309:  "jprinter",
+	5310:  "outlaws",
+	5312:  "permabit-cs",
+	5313:  "rrdp",
+	5314:  "opalis-rbt-ipc",
+	5315:  "hacl-poll",
+	5343:  "kfserver",
+	5344:  "xkotodrcp",
+	5349:  "stuns",
+	5350:  "pcp-multicast",
+	5351:  "pcp",
+	5352:  "dns-llq",
+	5353:  "mdns",
+	5354:  "mdnsresponder",
+	5355:  "llmnr",
+	5356:  "ms-smlbiz",
+	5357:  "wsdapi",
+	5358:  "wsdapi-s",
+	5359:  "ms-alerter",
+	5360:  "ms-sideshow",
+	5361:  "ms-s-sideshow",
+	5362:  "serverwsd2",
+	5363:  "net-projection",
+	5364:  "kdnet",
+	5397:  "stresstester",
+	5398:  "elektron-admin",
+	5399:  "securitychase",
+	5400:  "excerpt",
+	5401:  "excerpts",
+	5402:  "mftp",
+	5403:  "hpoms-ci-lstn",
+	5404:  "hpoms-dps-lstn",
+	5405:  "netsupport",
+	5406:  "systemics-sox",
+	5407:  "foresyte-clear",
+	5408:  "foresyte-sec",
+	5409:  "salient-dtasrv",
+	5410:  "salient-usrmgr",
+	5411:  "actnet",
+	5412:  "continuus",
+	5413:  "wwiotalk",
+	5414:  "statusd",
+	5415:  "ns-server",
+	5416:  "sns-gateway",
+	5417:  "sns-agent",
+	5418:  "mcntp",
+	5419:  "dj-ice",
+	5420:  "cylink-c",
+	5421:  "netsupport2",
+	5422:  "salient-mux",
+	5423:  "virtualuser",
+	5424:  "beyond-remote",
+	5425:  "br-channel",
+	5426:  "devbasic",
+	5427:  "sco-peer-tta",
+	5428:  "telaconsole",
+	5429:  "base",
+	5430:  "radec-corp",
+	5431:  "park-agent",
+	5432:  "postgresql",
+	5433:  "pyrrho",
+	5434:  "sgi-arrayd",
+	5435:  "sceanics",
+	5436:  "pmip6-cntl",
+	5437:  "pmip6-data",
+	5443:  "spss",
+	5450:  "tiepie-disc",
+	5453:  "surebox",
+	5454:  "apc-5454",
+	5455:  "apc-5455",
+	5456:  "apc-5456",
+	5461:  "silkmeter",
+	5462:  "ttl-publisher",
+	5463:  "ttlpriceproxy",
+	5464:  "quailnet",
+	5465:  "netops-broker",
+	5474:  "apsolab-rpc",
+	5500:  "fcp-addr-srvr1",
+	5501:  "fcp-addr-srvr2",
+	5502:  "fcp-srvr-inst1",
+	5503:  "fcp-srvr-inst2",
+	5504:  "fcp-cics-gw1",
+	5505:  "checkoutdb",
+	5506:  "amc",
+	5553:  "sgi-eventmond",
+	5554:  "sgi-esphttp",
+	5555:  "personal-agent",
+	5556:  "freeciv",
+	5567:  "dof-dps-mc-sec",
+	5568:  "sdt",
+	5569:  "rdmnet-device",
+	5573:  "sdmmp",
+	5580:  "tmosms0",
+	5581:  "tmosms1",
+	5582:  "fac-restore",
+	5583:  "tmo-icon-sync",
+	5584:  "bis-web",
+	5585:  "bis-sync",
+	5597:  "ininmessaging",
+	5598:  "mctfeed",
+	5599:  "esinstall",
+	5600:  "esmmanager",
+	5601:  "esmagent",
+	5602:  "a1-msc",
+	5603:  "a1-bs",
+	5604:  "a3-sdunode",
+	5605:  "a4-sdunode",
+	5627:  "ninaf",
+	5628:  "htrust",
+	5629:  "symantec-sfdb",
+	5630:  "precise-comm",
+	5631:  "pcanywheredata",
+	5632:  "pcanywherestat",
+	5633:  "beorl",
+	5634:  "xprtld",
+	5670:  "zre-disc",
+	5671:  "amqps",
+	5672:  "amqp",
+	5673:  "jms",
+	5674:  "hyperscsi-port",
+	5675:  "v5ua",
+	5676:  "raadmin",
+	5677:  "questdb2-lnchr",
+	5678:  "rrac",
+	5679:  "dccm",
+	5680:  "auriga-router",
+	5681:  "ncxcp",
+	5682:  "brightcore",
+	5683:  "coap",
+	5684:  "coaps",
+	5687:  "gog-multiplayer",
+	5688:  "ggz",
+	5689:  "qmvideo",
+	5713:  "proshareaudio",
+	5714:  "prosharevideo",
+	5715:  "prosharedata",
+	5716:  "prosharerequest",
+	5717:  "prosharenotify",
+	5718:  "dpm",
+	5719:  "dpm-agent",
+	5720:  "ms-licensing",
+	5721:  "dtpt",
+	5722:  "msdfsr",
+	5723:  "omhs",
+	5724:  "omsdk",
+	5728:  "io-dist-group",
+	5729:  "openmail",
+	5730:  "unieng",
+	5741:  "ida-discover1",
+	5742:  "ida-discover2",
+	5743:  "watchdoc-pod",
+	5744:  "watchdoc",
+	5745:  "fcopy-server",
+	5746:  "fcopys-server",
+	5747:  "tunatic",
+	5748:  "tunalyzer",
+	5750:  "rscd",
+	5755:  "openmailg",
+	5757:  "x500ms",
+	5766:  "openmailns",
+	5767:  "s-openmail",
+	5768:  "openmailpxy",
+	5769:  "spramsca",
+	5770:  "spramsd",
+	5771:  "netagent",
+	5777:  "dali-port",
+	5781:  "3par-evts",
+	5782:  "3par-mgmt",
+	5783:  "3par-mgmt-ssl",
+	5784:  "ibar",
+	5785:  "3par-rcopy",
+	5786:  "cisco-redu",
+	5787:  "waascluster",
+	5793:  "xtreamx",
+	5794:  "spdp",
+	5813:  "icmpd",
+	5814:  "spt-automation",
+	5859:  "wherehoo",
+	5863:  "ppsuitemsg",
+	5900:  "rfb",
+	5910:  "cm",
+	5911:  "cpdlc",
+	5912:  "fis",
+	5913:  "ads-c",
+	5963:  "indy",
+	5968:  "mppolicy-v5",
+	5969:  "mppolicy-mgr",
+	5984:  "couchdb",
+	5985:  "wsman",
+	5986:  "wsmans",
+	5987:  "wbem-rmi",
+	5988:  "wbem-http",
+	5989:  "wbem-https",
+	5990:  "wbem-exp-https",
+	5991:  "nuxsl",
+	5992:  "consul-insight",
+	5999:  "cvsup",
+	6064:  "ndl-ahp-svc",
+	6065:  "winpharaoh",
+	6066:  "ewctsp",
+	6069:  "trip",
+	6070:  "messageasap",
+	6071:  "ssdtp",
+	6072:  "diagnose-proc",
+	6073:  "directplay8",
+	6074:  "max",
+	6080:  "gue",
+	6081:  "geneve",
+	6082:  "p25cai",
+	6083:  "miami-bcast",
+	6085:  "konspire2b",
+	6086:  "pdtp",
+	6087:  "ldss",
+	6088:  "doglms-notify",
+	6100:  "synchronet-db",
+	6101:  "synchronet-rtc",
+	6102:  "synchronet-upd",
+	6103:  "rets",
+	6104:  "dbdb",
+	6105:  "primaserver",
+	6106:  "mpsserver",
+	6107:  "etc-control",
+	6108:  "sercomm-scadmin",
+	6109:  "globecast-id",
+	6110:  "softcm",
+	6111:  "spc",
+	6112:  "dtspcd",
+	6118:  "tipc",
+	6122:  "bex-webadmin",
+	6123:  "backup-express",
+	6124:  "pnbs",
+	6133:  "nbt-wol",
+	6140:  "pulsonixnls",
+	6141:  "meta-corp",
+	6142:  "aspentec-lm",
+	6143:  "watershed-lm",
+	6144:  "statsci1-lm",
+	6145:  "statsci2-lm",
+	6146:  "lonewolf-lm",
+	6147:  "montage-lm",
+	6148:  "ricardo-lm",
+	6149:  "tal-pod",
+	6160:  "ecmp-data",
+	6161:  "patrol-ism",
+	6162:  "patrol-coll",
+	6163:  "pscribe",
+	6200:  "lm-x",
+	6201:  "thermo-calc",
+	6209:  "qmtps",
+	6222:  "radmind",
+	6241:  "jeol-nsddp-1",
+	6242:  "jeol-nsddp-2",
+	6243:  "jeol-nsddp-3",
+	6244:  "jeol-nsddp-4",
+	6251:  "tl1-raw-ssl",
+	6252:  "tl1-ssh",
+	6253:  "crip",
+	6268:  "grid",
+	6269:  "grid-alt",
+	6300:  "bmc-grx",
+	6301:  "bmc-ctd-ldap",
+	6306:  "ufmp",
+	6315:  "scup-disc",
+	6316:  "abb-escp",
+	6317:  "nav-data",
+	6320:  "repsvc",
+	6321:  "emp-server1",
+	6322:  "emp-server2",
+	6324:  "hrd-ns-disc",
+	6343:  "sflow",
+	6346:  "gnutella-svc",
+	6347:  "gnutella-rtr",
+	6350:  "adap",
+	6355:  "pmcs",
+	6360:  "metaedit-mu",
+	6363:  "ndn",
+	6370:  "metaedit-se",
+	6382:  "metatude-mds",
+	6389:  "clariion-evr01",
+	6390:  "metaedit-ws",
+	6417:  "faxcomservice",
+	6419:  "svdrp-disc",
+	6420:  "nim-vdrshell",
+	6421:  "nim-wan",
+	6443:  "sun-sr-https",
+	6444:  "sge-qmaster",
+	6445:  "sge-execd",
+	6446:  "mysql-proxy",
+	6455:  "skip-cert-recv",
+	6456:  "skip-cert-send",
+	6464:  "ieee11073-20701",
+	6471:  "lvision-lm",
+	6480:  "sun-sr-http",
+	6481:  "servicetags",
+	6482:  "ldoms-mgmt",
+	6483:  "SunVTS-RMI",
+	6484:  "sun-sr-jms",
+	6485:  "sun-sr-iiop",
+	6486:  "sun-sr-iiops",
+	6487:  "sun-sr-iiop-aut",
+	6488:  "sun-sr-jmx",
+	6489:  "sun-sr-admin",
+	6500:  "boks",
+	6501:  "boks-servc",
+	6502:  "boks-servm",
+	6503:  "boks-clntd",
+	6505:  "badm-priv",
+	6506:  "badm-pub",
+	6507:  "bdir-priv",
+	6508:  "bdir-pub",
+	6509:  "mgcs-mfp-port",
+	6510:  "mcer-port",
+	6511:  "dccp-udp",
+	6514:  "syslog-tls",
+	6515:  "elipse-rec",
+	6543:  "lds-distrib",
+	6544:  "lds-dump",
+	6547:  "apc-6547",
+	6548:  "apc-6548",
+	6549:  "apc-6549",
+	6550:  "fg-sysupdate",
+	6551:  "sum",
+	6558:  "xdsxdm",
+	6566:  "sane-port",
+	6568:  "rp-reputation",
+	6579:  "affiliate",
+	6580:  "parsec-master",
+	6581:  "parsec-peer",
+	6582:  "parsec-game",
+	6583:  "joaJewelSuite",
+	6619:  "odette-ftps",
+	6620:  "kftp-data",
+	6621:  "kftp",
+	6622:  "mcftp",
+	6623:  "ktelnet",
+	6626:  "wago-service",
+	6627:  "nexgen",
+	6628:  "afesc-mc",
+	6629:  "nexgen-aux",
+	6633:  "cisco-vpath-tun",
+	6634:  "mpls-pm",
+	6635:  "mpls-udp",
+	6636:  "mpls-udp-dtls",
+	6653:  "openflow",
+	6657:  "palcom-disc",
+	6670:  "vocaltec-gold",
+	6671:  "p4p-portal",
+	6672:  "vision-server",
+	6673:  "vision-elmd",
+	6678:  "vfbp-disc",
+	6679:  "osaut",
+	6689:  "tsa",
+	6696:  "babel",
+	6701:  "kti-icad-srvr",
+	6702:  "e-design-net",
+	6703:  "e-design-web",
+	6714:  "ibprotocol",
+	6715:  "fibotrader-com",
+	6767:  "bmc-perf-agent",
+	6768:  "bmc-perf-mgrd",
+	6769:  "adi-gxp-srvprt",
+	6770:  "plysrv-http",
+	6771:  "plysrv-https",
+	6784:  "bfd-lag",
+	6785:  "dgpf-exchg",
+	6786:  "smc-jmx",
+	6787:  "smc-admin",
+	6788:  "smc-http",
+	6790:  "hnmp",
+	6791:  "hnm",
+	6801:  "acnet",
+	6831:  "ambit-lm",
+	6841:  "netmo-default",
+	6842:  "netmo-http",
+	6850:  "iccrushmore",
+	6868:  "acctopus-st",
+	6888:  "muse",
+	6935:  "ethoscan",
+	6936:  "xsmsvc",
+	6946:  "bioserver",
+	6951:  "otlp",
+	6961:  "jmact3",
+	6962:  "jmevt2",
+	6963:  "swismgr1",
+	6964:  "swismgr2",
+	6965:  "swistrap",
+	6966:  "swispol",
+	6969:  "acmsoda",
+	6997:  "MobilitySrv",
+	6998:  "iatp-highpri",
+	6999:  "iatp-normalpri",
+	7000:  "afs3-fileserver",
+	7001:  "afs3-callback",
+	7002:  "afs3-prserver",
+	7003:  "afs3-vlserver",
+	7004:  "afs3-kaserver",
+	7005:  "afs3-volser",
+	7006:  "afs3-errors",
+	7007:  "afs3-bos",
+	7008:  "afs3-update",
+	7009:  "afs3-rmtsys",
+	7010:  "ups-onlinet",
+	7011:  "talon-disc",
+	7012:  "talon-engine",
+	7013:  "microtalon-dis",
+	7014:  "microtalon-com",
+	7015:  "talon-webserver",
+	7016:  "spg",
+	7017:  "grasp",
+	7019:  "doceri-view",
+	7020:  "dpserve",
+	7021:  "dpserveadmin",
+	7022:  "ctdp",
+	7023:  "ct2nmcs",
+	7024:  "vmsvc",
+	7025:  "vmsvc-2",
+	7030:  "op-probe",
+	7040:  "quest-disc",
+	7070:  "arcp",
+	7071:  "iwg1",
+	7080:  "empowerid",
+	7088:  "zixi-transport",
+	7095:  "jdp-disc",
+	7099:  "lazy-ptop",
+	7100:  "font-service",
+	7101:  "elcn",
+	7107:  "aes-x170",
+	7121:  "virprot-lm",
+	7128:  "scenidm",
+	7129:  "scenccs",
+	7161:  "cabsm-comm",
+	7162:  "caistoragemgr",
+	7163:  "cacsambroker",
+	7164:  "fsr",
+	7165:  "doc-server",
+	7166:  "aruba-server",
+	7169:  "ccag-pib",
+	7170:  "nsrp",
+	7171:  "drm-production",
+	7174:  "clutild",
+	7181:  "janus-disc",
+	7200:  "fodms",
+	7201:  "dlip",
+	7227:  "ramp",
+	7235:  "aspcoordination",
+	7244:  "frc-hicp-disc",
+	7262:  "cnap",
+	7272:  "watchme-7272",
+	7273:  "oma-rlp",
+	7274:  "oma-rlp-s",
+	7275:  "oma-ulp",
+	7276:  "oma-ilp",
+	7277:  "oma-ilp-s",
+	7278:  "oma-dcdocbs",
+	7279:  "ctxlic",
+	7280:  "itactionserver1",
+	7281:  "itactionserver2",
+	7282:  "mzca-alert",
+	7365:  "lcm-server",
+	7391:  "mindfilesys",
+	7392:  "mrssrendezvous",
+	7393:  "nfoldman",
+	7394:  "fse",
+	7395:  "winqedit",
+	7397:  "hexarc",
+	7400:  "rtps-discovery",
+	7401:  "rtps-dd-ut",
+	7402:  "rtps-dd-mt",
+	7410:  "ionixnetmon",
+	7411:  "daqstream",
+	7421:  "mtportmon",
+	7426:  "pmdmgr",
+	7427:  "oveadmgr",
+	7428:  "ovladmgr",
+	7429:  "opi-sock",
+	7430:  "xmpv7",
+	7431:  "pmd",
+	7437:  "faximum",
+	7443:  "oracleas-https",
+	7473:  "rise",
+	7491:  "telops-lmd",
+	7500:  "silhouette",
+	7501:  "ovbus",
+	7510:  "ovhpas",
+	7511:  "pafec-lm",
+	7542:  "saratoga",
+	7543:  "atul",
+	7544:  "nta-ds",
+	7545:  "nta-us",
+	7546:  "cfs",
+	7547:  "cwmp",
+	7548:  "tidp",
+	7549:  "nls-tl",
+	7550:  "cloudsignaling",
+	7560:  "sncp",
+	7566:  "vsi-omega",
+	7570:  "aries-kfinder",
+	7574:  "coherence-disc",
+	7588:  "sun-lm",
+	7606:  "mipi-debug",
+	7624:  "indi",
+	7627:  "soap-http",
+	7628:  "zen-pawn",
+	7629:  "xdas",
+	7633:  "pmdfmgt",
+	7648:  "cuseeme",
+	7674:  "imqtunnels",
+	7675:  "imqtunnel",
+	7676:  "imqbrokerd",
+	7677:  "sun-user-https",
+	7680:  "pando-pub",
+	7689:  "collaber",
+	7697:  "klio",
+	7707:  "sync-em7",
+	7708:  "scinet",
+	7720:  "medimageportal",
+	7724:  "nsdeepfreezectl",
+	7725:  "nitrogen",
+	7726:  "freezexservice",
+	7727:  "trident-data",
+	7728:  "osvr",
+	7734:  "smip",
+	7738:  "aiagent",
+	7741:  "scriptview",
+	7743:  "sstp-1",
+	7744:  "raqmon-pdu",
+	7747:  "prgp",
+	7777:  "cbt",
+	7778:  "interwise",
+	7779:  "vstat",
+	7781:  "accu-lmgr",
+	7784:  "s-bfd",
+	7786:  "minivend",
+	7787:  "popup-reminders",
+	7789:  "office-tools",
+	7794:  "q3ade",
+	7797:  "pnet-conn",
+	7798:  "pnet-enc",
+	7799:  "altbsdp",
+	7800:  "asr",
+	7801:  "ssp-client",
+	7802:  "vns-tp",
+	7810:  "rbt-wanopt",
+	7845:  "apc-7845",
+	7846:  "apc-7846",
+	7872:  "mipv6tls",
+	7880:  "pss",
+	7887:  "ubroker",
+	7900:  "mevent",
+	7901:  "tnos-sp",
+	7902:  "tnos-dp",
+	7903:  "tnos-dps",
+	7913:  "qo-secure",
+	7932:  "t2-drm",
+	7933:  "t2-brm",
+	7962:  "generalsync",
+	7967:  "supercell",
+	7979:  "micromuse-ncps",
+	7980:  "quest-vista",
+	7982:  "sossd-disc",
+	7998:  "usicontentpush",
+	7999:  "irdmi2",
+	8000:  "irdmi",
+	8001:  "vcom-tunnel",
+	8002:  "teradataordbms",
+	8003:  "mcreport",
+	8005:  "mxi",
+	8006:  "wpl-disc",
+	8007:  "warppipe",
+	8008:  "http-alt",
+	8019:  "qbdb",
+	8020:  "intu-ec-svcdisc",
+	8021:  "intu-ec-client",
+	8022:  "oa-system",
+	8025:  "ca-audit-da",
+	8026:  "ca-audit-ds",
+	8032:  "pro-ed",
+	8033:  "mindprint",
+	8034:  "vantronix-mgmt",
+	8040:  "ampify",
+	8041:  "enguity-xccetp",
+	8052:  "senomix01",
+	8053:  "senomix02",
+	8054:  "senomix03",
+	8055:  "senomix04",
+	8056:  "senomix05",
+	8057:  "senomix06",
+	8058:  "senomix07",
+	8059:  "senomix08",
+	8060:  "aero",
+	8074:  "gadugadu",
+	8080:  "http-alt",
+	8081:  "sunproxyadmin",
+	8082:  "us-cli",
+	8083:  "us-srv",
+	8086:  "d-s-n",
+	8087:  "simplifymedia",
+	8088:  "radan-http",
+	8097:  "sac",
+	8100:  "xprint-server",
+	8115:  "mtl8000-matrix",
+	8116:  "cp-cluster",
+	8118:  "privoxy",
+	8121:  "apollo-data",
+	8122:  "apollo-admin",
+	8128:  "paycash-online",
+	8129:  "paycash-wbp",
+	8130:  "indigo-vrmi",
+	8131:  "indigo-vbcp",
+	8132:  "dbabble",
+	8148:  "isdd",
+	8149:  "eor-game",
+	8160:  "patrol",
+	8161:  "patrol-snmp",
+	8182:  "vmware-fdm",
+	8184:  "itach",
+	8192:  "spytechphone",
+	8194:  "blp1",
+	8195:  "blp2",
+	8199:  "vvr-data",
+	8200:  "trivnet1",
+	8201:  "trivnet2",
+	8202:  "aesop",
+	8204:  "lm-perfworks",
+	8205:  "lm-instmgr",
+	8206:  "lm-dta",
+	8207:  "lm-sserver",
+	8208:  "lm-webwatcher",
+	8230:  "rexecj",
+	8231:  "hncp-udp-port",
+	8232:  "hncp-dtls-port",
+	8243:  "synapse-nhttps",
+	8276:  "pando-sec",
+	8280:  "synapse-nhttp",
+	8282:  "libelle-disc",
+	8292:  "blp3",
+	8294:  "blp4",
+	8300:  "tmi",
+	8301:  "amberon",
+	8320:  "tnp-discover",
+	8321:  "tnp",
+	8322:  "garmin-marine",
+	8351:  "server-find",
+	8376:  "cruise-enum",
+	8377:  "cruise-swroute",
+	8378:  "cruise-config",
+	8379:  "cruise-diags",
+	8380:  "cruise-update",
+	8383:  "m2mservices",
+	8384:  "marathontp",
+	8400:  "cvd",
+	8401:  "sabarsd",
+	8402:  "abarsd",
+	8403:  "admind",
+	8416:  "espeech",
+	8417:  "espeech-rtp",
+	8442:  "cybro-a-bus",
+	8443:  "pcsync-https",
+	8444:  "pcsync-http",
+	8445:  "copy-disc",
+	8450:  "npmp",
+	8472:  "otv",
+	8473:  "vp2p",
+	8474:  "noteshare",
+	8500:  "fmtp",
+	8501:  "cmtp-av",
+	8503:  "lsp-self-ping",
+	8554:  "rtsp-alt",
+	8555:  "d-fence",
+	8567:  "dof-tunnel",
+	8600:  "asterix",
+	8609:  "canon-cpp-disc",
+	8610:  "canon-mfnp",
+	8611:  "canon-bjnp1",
+	8612:  "canon-bjnp2",
+	8613:  "canon-bjnp3",
+	8614:  "canon-bjnp4",
+	8675:  "msi-cps-rm-disc",
+	8686:  "sun-as-jmxrmi",
+	8732:  "dtp-net",
+	8733:  "ibus",
+	8763:  "mc-appserver",
+	8764:  "openqueue",
+	8765:  "ultraseek-http",
+	8766:  "amcs",
+	8770:  "dpap",
+	8786:  "msgclnt",
+	8787:  "msgsrvr",
+	8793:  "acd-pm",
+	8800:  "sunwebadmin",
+	8804:  "truecm",
+	8805:  "pfcp",
+	8808:  "ssports-bcast",
+	8873:  "dxspider",
+	8880:  "cddbp-alt",
+	8883:  "secure-mqtt",
+	8888:  "ddi-udp-1",
+	8889:  "ddi-udp-2",
+	8890:  "ddi-udp-3",
+	8891:  "ddi-udp-4",
+	8892:  "ddi-udp-5",
+	8893:  "ddi-udp-6",
+	8894:  "ddi-udp-7",
+	8899:  "ospf-lite",
+	8900:  "jmb-cds1",
+	8901:  "jmb-cds2",
+	8910:  "manyone-http",
+	8911:  "manyone-xml",
+	8912:  "wcbackup",
+	8913:  "dragonfly",
+	8954:  "cumulus-admin",
+	8980:  "nod-provider",
+	8981:  "nod-client",
+	8989:  "sunwebadmins",
+	8990:  "http-wmap",
+	8991:  "https-wmap",
+	8999:  "bctp",
+	9000:  "cslistener",
+	9001:  "etlservicemgr",
+	9002:  "dynamid",
+	9007:  "ogs-client",
+	9009:  "pichat",
+	9020:  "tambora",
+	9021:  "panagolin-ident",
+	9022:  "paragent",
+	9023:  "swa-1",
+	9024:  "swa-2",
+	9025:  "swa-3",
+	9026:  "swa-4",
+	9060:  "CardWeb-RT",
+	9080:  "glrpc",
+	9084:  "aurora",
+	9085:  "ibm-rsyscon",
+	9086:  "net2display",
+	9087:  "classic",
+	9088:  "sqlexec",
+	9089:  "sqlexec-ssl",
+	9090:  "websm",
+	9091:  "xmltec-xmlmail",
+	9092:  "XmlIpcRegSvc",
+	9100:  "hp-pdl-datastr",
+	9101:  "bacula-dir",
+	9102:  "bacula-fd",
+	9103:  "bacula-sd",
+	9104:  "peerwire",
+	9105:  "xadmin",
+	9106:  "astergate-disc",
+	9119:  "mxit",
+	9131:  "dddp",
+	9160:  "apani1",
+	9161:  "apani2",
+	9162:  "apani3",
+	9163:  "apani4",
+	9164:  "apani5",
+	9191:  "sun-as-jpda",
+	9200:  "wap-wsp",
+	9201:  "wap-wsp-wtp",
+	9202:  "wap-wsp-s",
+	9203:  "wap-wsp-wtp-s",
+	9204:  "wap-vcard",
+	9205:  "wap-vcal",
+	9206:  "wap-vcard-s",
+	9207:  "wap-vcal-s",
+	9208:  "rjcdb-vcards",
+	9209:  "almobile-system",
+	9210:  "oma-mlp",
+	9211:  "oma-mlp-s",
+	9212:  "serverviewdbms",
+	9213:  "serverstart",
+	9214:  "ipdcesgbs",
+	9215:  "insis",
+	9216:  "acme",
+	9217:  "fsc-port",
+	9222:  "teamcoherence",
+	9255:  "mon",
+	9277:  "traingpsdata",
+	9278:  "pegasus",
+	9279:  "pegasus-ctl",
+	9280:  "pgps",
+	9281:  "swtp-port1",
+	9282:  "swtp-port2",
+	9283:  "callwaveiam",
+	9284:  "visd",
+	9285:  "n2h2server",
+	9286:  "n2receive",
+	9287:  "cumulus",
+	9292:  "armtechdaemon",
+	9293:  "storview",
+	9294:  "armcenterhttp",
+	9295:  "armcenterhttps",
+	9300:  "vrace",
+	9318:  "secure-ts",
+	9321:  "guibase",
+	9343:  "mpidcmgr",
+	9344:  "mphlpdmc",
+	9346:  "ctechlicensing",
+	9374:  "fjdmimgr",
+	9380:  "boxp",
+	9396:  "fjinvmgr",
+	9397:  "mpidcagt",
+	9400:  "sec-t4net-srv",
+	9401:  "sec-t4net-clt",
+	9402:  "sec-pc2fax-srv",
+	9418:  "git",
+	9443:  "tungsten-https",
+	9444:  "wso2esb-console",
+	9450:  "sntlkeyssrvr",
+	9500:  "ismserver",
+	9522:  "sma-spw",
+	9535:  "mngsuite",
+	9536:  "laes-bf",
+	9555:  "trispen-sra",
+	9592:  "ldgateway",
+	9593:  "cba8",
+	9594:  "msgsys",
+	9595:  "pds",
+	9596:  "mercury-disc",
+	9597:  "pd-admin",
+	9598:  "vscp",
+	9599:  "robix",
+	9600:  "micromuse-ncpw",
+	9612:  "streamcomm-ds",
+	9618:  "condor",
+	9628:  "odbcpathway",
+	9629:  "uniport",
+	9632:  "mc-comm",
+	9667:  "xmms2",
+	9668:  "tec5-sdctp",
+	9694:  "client-wakeup",
+	9695:  "ccnx",
+	9700:  "board-roar",
+	9747:  "l5nas-parchan",
+	9750:  "board-voip",
+	9753:  "rasadv",
+	9762:  "tungsten-http",
+	9800:  "davsrc",
+	9801:  "sstp-2",
+	9802:  "davsrcs",
+	9875:  "sapv1",
+	9878:  "kca-service",
+	9888:  "cyborg-systems",
+	9889:  "gt-proxy",
+	9898:  "monkeycom",
+	9899:  "sctp-tunneling",
+	9900:  "iua",
+	9901:  "enrp",
+	9903:  "multicast-ping",
+	9909:  "domaintime",
+	9911:  "sype-transport",
+	9950:  "apc-9950",
+	9951:  "apc-9951",
+	9952:  "apc-9952",
+	9953:  "acis",
+	9955:  "alljoyn-mcm",
+	9956:  "alljoyn",
+	9966:  "odnsp",
+	9987:  "dsm-scm-target",
+	9990:  "osm-appsrvr",
+	9991:  "osm-oev",
+	9992:  "palace-1",
+	9993:  "palace-2",
+	9994:  "palace-3",
+	9995:  "palace-4",
+	9996:  "palace-5",
+	9997:  "palace-6",
+	9998:  "distinct32",
+	9999:  "distinct",
+	10000: "ndmp",
+	10001: "scp-config",
+	10002: "documentum",
+	10003: "documentum-s",
+	10007: "mvs-capacity",
+	10008: "octopus",
+	10009: "swdtp-sv",
+	10050: "zabbix-agent",
+	10051: "zabbix-trapper",
+	10080: "amanda",
+	10081: "famdc",
+	10100: "itap-ddtp",
+	10101: "ezmeeting-2",
+	10102: "ezproxy-2",
+	10103: "ezrelay",
+	10104: "swdtp",
+	10107: "bctp-server",
+	10110: "nmea-0183",
+	10111: "nmea-onenet",
+	10113: "netiq-endpoint",
+	10114: "netiq-qcheck",
+	10115: "netiq-endpt",
+	10116: "netiq-voipa",
+	10117: "iqrm",
+	10128: "bmc-perf-sd",
+	10160: "qb-db-server",
+	10161: "snmpdtls",
+	10162: "snmpdtls-trap",
+	10200: "trisoap",
+	10201: "rscs",
+	10252: "apollo-relay",
+	10253: "eapol-relay",
+	10260: "axis-wimp-port",
+	10288: "blocks",
+	10439: "bngsync",
+	10500: "hip-nat-t",
+	10540: "MOS-lower",
+	10541: "MOS-upper",
+	10542: "MOS-aux",
+	10543: "MOS-soap",
+	10544: "MOS-soap-opt",
+	10800: "gap",
+	10805: "lpdg",
+	10810: "nmc-disc",
+	10860: "helix",
+	10880: "bveapi",
+	10990: "rmiaux",
+	11000: "irisa",
+	11001: "metasys",
+	10023: "cefd-vmp",
+	11095: "weave",
+	11106: "sgi-lk",
+	11108: "myq-termlink",
+	11111: "vce",
+	11112: "dicom",
+	11161: "suncacao-snmp",
+	11162: "suncacao-jmxmp",
+	11163: "suncacao-rmi",
+	11164: "suncacao-csa",
+	11165: "suncacao-websvc",
+	11171: "snss",
+	11201: "smsqp",
+	11208: "wifree",
+	11211: "memcache",
+	11319: "imip",
+	11320: "imip-channels",
+	11321: "arena-server",
+	11367: "atm-uhas",
+	11371: "hkp",
+	11430: "lsdp",
+	11600: "tempest-port",
+	11720: "h323callsigalt",
+	11723: "emc-xsw-dcache",
+	11751: "intrepid-ssl",
+	11796: "lanschool-mpt",
+	11876: "xoraya",
+	11877: "x2e-disc",
+	11967: "sysinfo-sp",
+	12000: "entextxid",
+	12001: "entextnetwk",
+	12002: "entexthigh",
+	12003: "entextmed",
+	12004: "entextlow",
+	12005: "dbisamserver1",
+	12006: "dbisamserver2",
+	12007: "accuracer",
+	12008: "accuracer-dbms",
+	12009: "ghvpn",
+	12012: "vipera",
+	12013: "vipera-ssl",
+	12109: "rets-ssl",
+	12121: "nupaper-ss",
+	12168: "cawas",
+	12172: "hivep",
+	12300: "linogridengine",
+	12321: "warehouse-sss",
+	12322: "warehouse",
+	12345: "italk",
+	12753: "tsaf",
+	13160: "i-zipqd",
+	13216: "bcslogc",
+	13217: "rs-pias",
+	13218: "emc-vcas-udp",
+	13223: "powwow-client",
+	13224: "powwow-server",
+	13400: "doip-disc",
+	13720: "bprd",
+	13721: "bpdbm",
+	13722: "bpjava-msvc",
+	13724: "vnetd",
+	13782: "bpcd",
+	13783: "vopied",
+	13785: "nbdb",
+	13786: "nomdb",
+	13818: "dsmcc-config",
+	13819: "dsmcc-session",
+	13820: "dsmcc-passthru",
+	13821: "dsmcc-download",
+	13822: "dsmcc-ccp",
+	13894: "ucontrol",
+	13929: "dta-systems",
+	14000: "scotty-ft",
+	14001: "sua",
+	14002: "scotty-disc",
+	14033: "sage-best-com1",
+	14034: "sage-best-com2",
+	14141: "vcs-app",
+	14142: "icpp",
+	14145: "gcm-app",
+	14149: "vrts-tdd",
+	14154: "vad",
+	14250: "cps",
+	14414: "ca-web-update",
+	14936: "hde-lcesrvr-1",
+	14937: "hde-lcesrvr-2",
+	15000: "hydap",
+	15118: "v2g-secc",
+	15345: "xpilot",
+	15363: "3link",
+	15555: "cisco-snat",
+	15660: "bex-xr",
+	15740: "ptp",
+	15998: "2ping",
+	16003: "alfin",
+	16161: "sun-sea-port",
+	16309: "etb4j",
+	16310: "pduncs",
+	16311: "pdefmns",
+	16360: "netserialext1",
+	16361: "netserialext2",
+	16367: "netserialext3",
+	16368: "netserialext4",
+	16384: "connected",
+	16666: "vtp",
+	16900: "newbay-snc-mc",
+	16950: "sgcip",
+	16991: "intel-rci-mp",
+	16992: "amt-soap-http",
+	16993: "amt-soap-https",
+	16994: "amt-redir-tcp",
+	16995: "amt-redir-tls",
+	17007: "isode-dua",
+	17185: "soundsvirtual",
+	17219: "chipper",
+	17220: "avtp",
+	17221: "avdecc",
+	17222: "cpsp",
+	17224: "trdp-pd",
+	17225: "trdp-md",
+	17234: "integrius-stp",
+	17235: "ssh-mgmt",
+	17500: "db-lsp-disc",
+	17729: "ea",
+	17754: "zep",
+	17755: "zigbee-ip",
+	17756: "zigbee-ips",
+	18000: "biimenu",
+	18181: "opsec-cvp",
+	18182: "opsec-ufp",
+	18183: "opsec-sam",
+	18184: "opsec-lea",
+	18185: "opsec-omi",
+	18186: "ohsc",
+	18187: "opsec-ela",
+	18241: "checkpoint-rtm",
+	18262: "gv-pf",
+	18463: "ac-cluster",
+	18634: "rds-ib",
+	18635: "rds-ip",
+	18668: "vdmmesh-disc",
+	18769: "ique",
+	18881: "infotos",
+	18888: "apc-necmp",
+	19000: "igrid",
+	19007: "scintilla",
+	19191: "opsec-uaa",
+	19194: "ua-secureagent",
+	19220: "cora-disc",
+	19283: "keysrvr",
+	19315: "keyshadow",
+	19398: "mtrgtrans",
+	19410: "hp-sco",
+	19411: "hp-sca",
+	19412: "hp-sessmon",
+	19539: "fxuptp",
+	19540: "sxuptp",
+	19541: "jcp",
+	19788: "mle",
+	19999: "dnp-sec",
+	20000: "dnp",
+	20001: "microsan",
+	20002: "commtact-http",
+	20003: "commtact-https",
+	20005: "openwebnet",
+	20012: "ss-idi-disc",
+	20014: "opendeploy",
+	20034: "nburn-id",
+	20046: "tmophl7mts",
+	20048: "mountd",
+	20049: "nfsrdma",
+	20167: "tolfab",
+	20202: "ipdtp-port",
+	20222: "ipulse-ics",
+	20480: "emwavemsg",
+	20670: "track",
+	20999: "athand-mmp",
+	21000: "irtrans",
+	21554: "dfserver",
+	21590: "vofr-gateway",
+	21800: "tvpm",
+	21845: "webphone",
+	21846: "netspeak-is",
+	21847: "netspeak-cs",
+	21848: "netspeak-acd",
+	21849: "netspeak-cps",
+	22000: "snapenetio",
+	22001: "optocontrol",
+	22002: "optohost002",
+	22003: "optohost003",
+	22004: "optohost004",
+	22005: "optohost004",
+	22273: "wnn6",
+	22305: "cis",
+	22335: "shrewd-stream",
+	22343: "cis-secure",
+	22347: "wibukey",
+	22350: "codemeter",
+	22555: "vocaltec-phone",
+	22763: "talikaserver",
+	22800: "aws-brf",
+	22951: "brf-gw",
+	23000: "inovaport1",
+	23001: "inovaport2",
+	23002: "inovaport3",
+	23003: "inovaport4",
+	23004: "inovaport5",
+	23005: "inovaport6",
+	23272: "s102",
+	23294: "5afe-disc",
+	23333: "elxmgmt",
+	23400: "novar-dbase",
+	23401: "novar-alarm",
+	23402: "novar-global",
+	24000: "med-ltp",
+	24001: "med-fsp-rx",
+	24002: "med-fsp-tx",
+	24003: "med-supp",
+	24004: "med-ovw",
+	24005: "med-ci",
+	24006: "med-net-svc",
+	24242: "filesphere",
+	24249: "vista-4gl",
+	24321: "ild",
+	24322: "hid",
+	24386: "intel-rci",
+	24465: "tonidods",
+	24554: "binkp",
+	24577: "bilobit-update",
+	24676: "canditv",
+	24677: "flashfiler",
+	24678: "proactivate",
+	24680: "tcc-http",
+	24850: "assoc-disc",
+	24922: "find",
+	25000: "icl-twobase1",
+	25001: "icl-twobase2",
+	25002: "icl-twobase3",
+	25003: "icl-twobase4",
+	25004: "icl-twobase5",
+	25005: "icl-twobase6",
+	25006: "icl-twobase7",
+	25007: "icl-twobase8",
+	25008: "icl-twobase9",
+	25009: "icl-twobase10",
+	25793: "vocaltec-hos",
+	25900: "tasp-net",
+	25901: "niobserver",
+	25902: "nilinkanalyst",
+	25903: "niprobe",
+	25954: "bf-game",
+	25955: "bf-master",
+	26000: "quake",
+	26133: "scscp",
+	26208: "wnn6-ds",
+	26260: "ezproxy",
+	26261: "ezmeeting",
+	26262: "k3software-svr",
+	26263: "k3software-cli",
+	26486: "exoline-udp",
+	26487: "exoconfig",
+	26489: "exonet",
+	27345: "imagepump",
+	27442: "jesmsjc",
+	27504: "kopek-httphead",
+	27782: "ars-vista",
+	27999: "tw-auth-key",
+	28000: "nxlmd",
+	28119: "a27-ran-ran",
+	28200: "voxelstorm",
+	28240: "siemensgsm",
+	29167: "otmp",
+	30001: "pago-services1",
+	30002: "pago-services2",
+	30003: "amicon-fpsu-ra",
+	30004: "amicon-fpsu-s",
+	30260: "kingdomsonline",
+	30832: "samsung-disc",
+	30999: "ovobs",
+	31016: "ka-kdp",
+	31029: "yawn",
+	31416: "xqosd",
+	31457: "tetrinet",
+	31620: "lm-mon",
+	31765: "gamesmith-port",
+	31948: "iceedcp-tx",
+	31949: "iceedcp-rx",
+	32034: "iracinghelper",
+	32249: "t1distproc60",
+	32483: "apm-link",
+	32635: "sec-ntb-clnt",
+	32636: "DMExpress",
+	32767: "filenet-powsrm",
+	32768: "filenet-tms",
+	32769: "filenet-rpc",
+	32770: "filenet-nch",
+	32771: "filenet-rmi",
+	32772: "filenet-pa",
+	32773: "filenet-cm",
+	32774: "filenet-re",
+	32775: "filenet-pch",
+	32776: "filenet-peior",
+	32777: "filenet-obrok",
+	32801: "mlsn",
+	32896: "idmgratm",
+	33123: "aurora-balaena",
+	33331: "diamondport",
+	33334: "speedtrace-disc",
+	33434: "traceroute",
+	33656: "snip-slave",
+	34249: "turbonote-2",
+	34378: "p-net-local",
+	34379: "p-net-remote",
+	34567: "edi_service",
+	34962: "profinet-rt",
+	34963: "profinet-rtm",
+	34964: "profinet-cm",
+	34980: "ethercat",
+	35001: "rt-viewer",
+	35004: "rt-classmanager",
+	35100: "axio-disc",
+	35355: "altova-lm-disc",
+	36001: "allpeers",
+	36411: "wlcp",
+	36865: "kastenxpipe",
+	37475: "neckar",
+	37654: "unisys-eportal",
+	38002: "crescoctrl-disc",
+	38201: "galaxy7-data",
+	38202: "fairview",
+	38203: "agpolicy",
+	39681: "turbonote-1",
+	40000: "safetynetp",
+	40023: "k-patentssensor",
+	40841: "cscp",
+	40842: "csccredir",
+	40843: "csccfirewall",
+	40853: "ortec-disc",
+	41111: "fs-qos",
+	41230: "z-wave-s",
+	41794: "crestron-cip",
+	41795: "crestron-ctp",
+	42508: "candp",
+	42509: "candrp",
+	42510: "caerpc",
+	43000: "recvr-rc-disc",
+	43188: "reachout",
+	43189: "ndm-agent-port",
+	43190: "ip-provision",
+	43210: "shaperai-disc",
+	43439: "eq3-config",
+	43440: "ew-disc-cmd",
+	43441: "ciscocsdb",
+	44321: "pmcd",
+	44322: "pmcdproxy",
+	44544: "domiq",
+	44553: "rbr-debug",
+	44600: "asihpi",
+	44818: "EtherNet-IP-2",
+	44900: "m3da-disc",
+	45000: "asmp-mon",
+	45054: "invision-ag",
+	45514: "cloudcheck-ping",
+	45678: "eba",
+	45825: "qdb2service",
+	45966: "ssr-servermgr",
+	46999: "mediabox",
+	47000: "mbus",
+	47100: "jvl-mactalk",
+	47557: "dbbrowse",
+	47624: "directplaysrvr",
+	47806: "ap",
+	47808: "bacnet",
+	47809: "presonus-ucnet",
+	48000: "nimcontroller",
+	48001: "nimspooler",
+	48002: "nimhub",
+	48003: "nimgtw",
+	48128: "isnetserv",
+	48129: "blp5",
+	48556: "com-bardac-dw",
+	48619: "iqobject",
+	48653: "robotraconteur",
+	49001: "nusdp-disc",
+}
+var sctpPortNames = map[SCTPPort]string{
+	9:     "discard",
+	20:    "ftp-data",
+	21:    "ftp",
+	22:    "ssh",
+	80:    "http",
+	179:   "bgp",
+	443:   "https",
+	1021:  "exp1",
+	1022:  "exp2",
+	1167:  "cisco-ipsla",
+	1720:  "h323hostcall",
+	2049:  "nfs",
+	2225:  "rcip-itu",
+	2904:  "m2ua",
+	2905:  "m3ua",
+	2944:  "megaco-h248",
+	2945:  "h248-binary",
+	3097:  "itu-bicc-stc",
+	3565:  "m2pa",
+	3863:  "asap-sctp",
+	3864:  "asap-sctp-tls",
+	3868:  "diameter",
+	4333:  "ahsp",
+	4502:  "a25-fap-fgw",
+	4711:  "trinity-dist",
+	4739:  "ipfix",
+	4740:  "ipfixs",
+	5060:  "sip",
+	5061:  "sips",
+	5090:  "car",
+	5091:  "cxtp",
+	5215:  "noteza",
+	5445:  "smbdirect",
+	5672:  "amqp",
+	5675:  "v5ua",
+	5868:  "diameters",
+	5910:  "cm",
+	5911:  "cpdlc",
+	5912:  "fis",
+	5913:  "ads-c",
+	6704:  "frc-hp",
+	6705:  "frc-mp",
+	6706:  "frc-lp",
+	6970:  "conductor-mpx",
+	7626:  "simco",
+	7701:  "nfapi",
+	7728:  "osvr",
+	8471:  "pim-port",
+	9082:  "lcs-ap",
+	9084:  "aurora",
+	9900:  "iua",
+	9901:  "enrp-sctp",
+	9902:  "enrp-sctp-tls",
+	11997: "wmereceiving",
+	11998: "wmedistribution",
+	11999: "wmereporting",
+	14001: "sua",
+	20049: "nfsrdma",
+	25471: "rna",
+	29118: "sgsap",
+	29168: "sbcap",
+	29169: "iuhsctpassoc",
+	30100: "rwp",
+	36412: "s1-control",
+	36422: "x2-control",
+	36423: "slmap",
+	36424: "nq-ap",
+	36443: "m2ap",
+	36444: "m3ap",
+	36462: "xw-control",
+	38412: "ng-control",
+	38422: "xn-control",
+	38472: "f1-control",
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp4.go b/vendor/github.com/google/gopacket/layers/icmp4.go
new file mode 100644
index 0000000..bd3f03f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp4.go
@@ -0,0 +1,267 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	ICMPv4TypeEchoReply              = 0
+	ICMPv4TypeDestinationUnreachable = 3
+	ICMPv4TypeSourceQuench           = 4
+	ICMPv4TypeRedirect               = 5
+	ICMPv4TypeEchoRequest            = 8
+	ICMPv4TypeRouterAdvertisement    = 9
+	ICMPv4TypeRouterSolicitation     = 10
+	ICMPv4TypeTimeExceeded           = 11
+	ICMPv4TypeParameterProblem       = 12
+	ICMPv4TypeTimestampRequest       = 13
+	ICMPv4TypeTimestampReply         = 14
+	ICMPv4TypeInfoRequest            = 15
+	ICMPv4TypeInfoReply              = 16
+	ICMPv4TypeAddressMaskRequest     = 17
+	ICMPv4TypeAddressMaskReply       = 18
+)
+
+const (
+	// DestinationUnreachable
+	ICMPv4CodeNet                 = 0
+	ICMPv4CodeHost                = 1
+	ICMPv4CodeProtocol            = 2
+	ICMPv4CodePort                = 3
+	ICMPv4CodeFragmentationNeeded = 4
+	ICMPv4CodeSourceRoutingFailed = 5
+	ICMPv4CodeNetUnknown          = 6
+	ICMPv4CodeHostUnknown         = 7
+	ICMPv4CodeSourceIsolated      = 8
+	ICMPv4CodeNetAdminProhibited  = 9
+	ICMPv4CodeHostAdminProhibited = 10
+	ICMPv4CodeNetTOS              = 11
+	ICMPv4CodeHostTOS             = 12
+	ICMPv4CodeCommAdminProhibited = 13
+	ICMPv4CodeHostPrecedence      = 14
+	ICMPv4CodePrecedenceCutoff    = 15
+
+	// TimeExceeded
+	ICMPv4CodeTTLExceeded                    = 0
+	ICMPv4CodeFragmentReassemblyTimeExceeded = 1
+
+	// ParameterProblem
+	ICMPv4CodePointerIndicatesError = 0
+	ICMPv4CodeMissingOption         = 1
+	ICMPv4CodeBadLength             = 2
+
+	// Redirect
+	// ICMPv4CodeNet  = same as for DestinationUnreachable
+	// ICMPv4CodeHost = same as for DestinationUnreachable
+	ICMPv4CodeTOSNet  = 2
+	ICMPv4CodeTOSHost = 3
+)
+
+type icmpv4TypeCodeInfoStruct struct {
+	typeStr string
+	codeStr *map[uint8]string
+}
+
+var (
+	icmpv4TypeCodeInfo = map[uint8]icmpv4TypeCodeInfoStruct{
+		ICMPv4TypeDestinationUnreachable: icmpv4TypeCodeInfoStruct{
+			"DestinationUnreachable", &map[uint8]string{
+				ICMPv4CodeNet:                 "Net",
+				ICMPv4CodeHost:                "Host",
+				ICMPv4CodeProtocol:            "Protocol",
+				ICMPv4CodePort:                "Port",
+				ICMPv4CodeFragmentationNeeded: "FragmentationNeeded",
+				ICMPv4CodeSourceRoutingFailed: "SourceRoutingFailed",
+				ICMPv4CodeNetUnknown:          "NetUnknown",
+				ICMPv4CodeHostUnknown:         "HostUnknown",
+				ICMPv4CodeSourceIsolated:      "SourceIsolated",
+				ICMPv4CodeNetAdminProhibited:  "NetAdminProhibited",
+				ICMPv4CodeHostAdminProhibited: "HostAdminProhibited",
+				ICMPv4CodeNetTOS:              "NetTOS",
+				ICMPv4CodeHostTOS:             "HostTOS",
+				ICMPv4CodeCommAdminProhibited: "CommAdminProhibited",
+				ICMPv4CodeHostPrecedence:      "HostPrecedence",
+				ICMPv4CodePrecedenceCutoff:    "PrecedenceCutoff",
+			},
+		},
+		ICMPv4TypeTimeExceeded: icmpv4TypeCodeInfoStruct{
+			"TimeExceeded", &map[uint8]string{
+				ICMPv4CodeTTLExceeded:                    "TTLExceeded",
+				ICMPv4CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+			},
+		},
+		ICMPv4TypeParameterProblem: icmpv4TypeCodeInfoStruct{
+			"ParameterProblem", &map[uint8]string{
+				ICMPv4CodePointerIndicatesError: "PointerIndicatesError",
+				ICMPv4CodeMissingOption:         "MissingOption",
+				ICMPv4CodeBadLength:             "BadLength",
+			},
+		},
+		ICMPv4TypeSourceQuench: icmpv4TypeCodeInfoStruct{
+			"SourceQuench", nil,
+		},
+		ICMPv4TypeRedirect: icmpv4TypeCodeInfoStruct{
+			"Redirect", &map[uint8]string{
+				ICMPv4CodeNet:     "Net",
+				ICMPv4CodeHost:    "Host",
+				ICMPv4CodeTOSNet:  "TOS+Net",
+				ICMPv4CodeTOSHost: "TOS+Host",
+			},
+		},
+		ICMPv4TypeEchoRequest: icmpv4TypeCodeInfoStruct{
+			"EchoRequest", nil,
+		},
+		ICMPv4TypeEchoReply: icmpv4TypeCodeInfoStruct{
+			"EchoReply", nil,
+		},
+		ICMPv4TypeTimestampRequest: icmpv4TypeCodeInfoStruct{
+			"TimestampRequest", nil,
+		},
+		ICMPv4TypeTimestampReply: icmpv4TypeCodeInfoStruct{
+			"TimestampReply", nil,
+		},
+		ICMPv4TypeInfoRequest: icmpv4TypeCodeInfoStruct{
+			"InfoRequest", nil,
+		},
+		ICMPv4TypeInfoReply: icmpv4TypeCodeInfoStruct{
+			"InfoReply", nil,
+		},
+		ICMPv4TypeRouterSolicitation: icmpv4TypeCodeInfoStruct{
+			"RouterSolicitation", nil,
+		},
+		ICMPv4TypeRouterAdvertisement: icmpv4TypeCodeInfoStruct{
+			"RouterAdvertisement", nil,
+		},
+		ICMPv4TypeAddressMaskRequest: icmpv4TypeCodeInfoStruct{
+			"AddressMaskRequest", nil,
+		},
+		ICMPv4TypeAddressMaskReply: icmpv4TypeCodeInfoStruct{
+			"AddressMaskReply", nil,
+		},
+	}
+)
+
+type ICMPv4TypeCode uint16
+
+// Type returns the ICMPv4 type field.
+func (a ICMPv4TypeCode) Type() uint8 {
+	return uint8(a >> 8)
+}
+
+// Code returns the ICMPv4 code field.
+func (a ICMPv4TypeCode) Code() uint8 {
+	return uint8(a)
+}
+
+func (a ICMPv4TypeCode) String() string {
+	t, c := a.Type(), a.Code()
+	strInfo, ok := icmpv4TypeCodeInfo[t]
+	if !ok {
+		// Unknown ICMPv4 type field
+		return fmt.Sprintf("%d(%d)", t, c)
+	}
+	typeStr := strInfo.typeStr
+	if strInfo.codeStr == nil && c == 0 {
+		// The ICMPv4 type does not make use of the code field
+		return fmt.Sprintf("%s", strInfo.typeStr)
+	}
+	if strInfo.codeStr == nil && c != 0 {
+		// The ICMPv4 type does not make use of the code field, but it is present anyway
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	codeStr, ok := (*strInfo.codeStr)[c]
+	if !ok {
+		// We don't know this ICMPv4 code; print the numerical value
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv4TypeCode) GoString() string {
+	t := reflect.TypeOf(a)
+	return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv4TypeCode value to the 'bytes' buffer.
+func (a ICMPv4TypeCode) SerializeTo(bytes []byte) {
+	binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv4TypeCode is a convenience function to create an ICMPv4TypeCode
+// gopacket type from the ICMPv4 type and code values.
+func CreateICMPv4TypeCode(typ uint8, code uint8) ICMPv4TypeCode {
+	return ICMPv4TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv4 is the layer for IPv4 ICMP packet data.
+type ICMPv4 struct {
+	BaseLayer
+	TypeCode ICMPv4TypeCode
+	Checksum uint16
+	Id       uint16
+	Seq      uint16
+}
+
+// LayerType returns LayerTypeICMPv4.
+func (i *ICMPv4) LayerType() gopacket.LayerType { return LayerTypeICMPv4 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 8 bytes for ICMPv4 packet")
+	}
+	i.TypeCode = CreateICMPv4TypeCode(data[0], data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.Id = binary.BigEndian.Uint16(data[4:6])
+	i.Seq = binary.BigEndian.Uint16(data[6:8])
+	i.BaseLayer = BaseLayer{data[:8], data[8:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	i.TypeCode.SerializeTo(bytes)
+	binary.BigEndian.PutUint16(bytes[4:], i.Id)
+	binary.BigEndian.PutUint16(bytes[6:], i.Seq)
+	if opts.ComputeChecksums {
+		bytes[2] = 0
+		bytes[3] = 0
+		i.Checksum = tcpipChecksum(b.Bytes(), 0)
+	}
+	binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv4) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv4
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv4) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func decodeICMPv4(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv4{}
+	return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6.go b/vendor/github.com/google/gopacket/layers/icmp6.go
new file mode 100644
index 0000000..09afd11
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6.go
@@ -0,0 +1,266 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"reflect"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// The following are from RFC 4443
+	ICMPv6TypeDestinationUnreachable = 1
+	ICMPv6TypePacketTooBig           = 2
+	ICMPv6TypeTimeExceeded           = 3
+	ICMPv6TypeParameterProblem       = 4
+	ICMPv6TypeEchoRequest            = 128
+	ICMPv6TypeEchoReply              = 129
+
+	// The following are from RFC 4861
+	ICMPv6TypeRouterSolicitation    = 133
+	ICMPv6TypeRouterAdvertisement   = 134
+	ICMPv6TypeNeighborSolicitation  = 135
+	ICMPv6TypeNeighborAdvertisement = 136
+	ICMPv6TypeRedirect              = 137
+
+	// The following are from RFC 2710
+	ICMPv6TypeMLDv1MulticastListenerQueryMessage  = 130
+	ICMPv6TypeMLDv1MulticastListenerReportMessage = 131
+	ICMPv6TypeMLDv1MulticastListenerDoneMessage   = 132
+
+	// The following are from RFC 3810
+	ICMPv6TypeMLDv2MulticastListenerReportMessageV2 = 143
+)
+
+const (
+	// DestinationUnreachable
+	ICMPv6CodeNoRouteToDst           = 0
+	ICMPv6CodeAdminProhibited        = 1
+	ICMPv6CodeBeyondScopeOfSrc       = 2
+	ICMPv6CodeAddressUnreachable     = 3
+	ICMPv6CodePortUnreachable        = 4
+	ICMPv6CodeSrcAddressFailedPolicy = 5
+	ICMPv6CodeRejectRouteToDst       = 6
+
+	// TimeExceeded
+	ICMPv6CodeHopLimitExceeded               = 0
+	ICMPv6CodeFragmentReassemblyTimeExceeded = 1
+
+	// ParameterProblem
+	ICMPv6CodeErroneousHeaderField   = 0
+	ICMPv6CodeUnrecognizedNextHeader = 1
+	ICMPv6CodeUnrecognizedIPv6Option = 2
+)
+
+type icmpv6TypeCodeInfoStruct struct {
+	typeStr string
+	codeStr *map[uint8]string
+}
+
+var (
+	icmpv6TypeCodeInfo = map[uint8]icmpv6TypeCodeInfoStruct{
+		ICMPv6TypeDestinationUnreachable: icmpv6TypeCodeInfoStruct{
+			"DestinationUnreachable", &map[uint8]string{
+				ICMPv6CodeNoRouteToDst:           "NoRouteToDst",
+				ICMPv6CodeAdminProhibited:        "AdminProhibited",
+				ICMPv6CodeBeyondScopeOfSrc:       "BeyondScopeOfSrc",
+				ICMPv6CodeAddressUnreachable:     "AddressUnreachable",
+				ICMPv6CodePortUnreachable:        "PortUnreachable",
+				ICMPv6CodeSrcAddressFailedPolicy: "SrcAddressFailedPolicy",
+				ICMPv6CodeRejectRouteToDst:       "RejectRouteToDst",
+			},
+		},
+		ICMPv6TypePacketTooBig: icmpv6TypeCodeInfoStruct{
+			"PacketTooBig", nil,
+		},
+		ICMPv6TypeTimeExceeded: icmpv6TypeCodeInfoStruct{
+			"TimeExceeded", &map[uint8]string{
+				ICMPv6CodeHopLimitExceeded:               "HopLimitExceeded",
+				ICMPv6CodeFragmentReassemblyTimeExceeded: "FragmentReassemblyTimeExceeded",
+			},
+		},
+		ICMPv6TypeParameterProblem: icmpv6TypeCodeInfoStruct{
+			"ParameterProblem", &map[uint8]string{
+				ICMPv6CodeErroneousHeaderField:   "ErroneousHeaderField",
+				ICMPv6CodeUnrecognizedNextHeader: "UnrecognizedNextHeader",
+				ICMPv6CodeUnrecognizedIPv6Option: "UnrecognizedIPv6Option",
+			},
+		},
+		ICMPv6TypeEchoRequest: icmpv6TypeCodeInfoStruct{
+			"EchoRequest", nil,
+		},
+		ICMPv6TypeEchoReply: icmpv6TypeCodeInfoStruct{
+			"EchoReply", nil,
+		},
+		ICMPv6TypeRouterSolicitation: icmpv6TypeCodeInfoStruct{
+			"RouterSolicitation", nil,
+		},
+		ICMPv6TypeRouterAdvertisement: icmpv6TypeCodeInfoStruct{
+			"RouterAdvertisement", nil,
+		},
+		ICMPv6TypeNeighborSolicitation: icmpv6TypeCodeInfoStruct{
+			"NeighborSolicitation", nil,
+		},
+		ICMPv6TypeNeighborAdvertisement: icmpv6TypeCodeInfoStruct{
+			"NeighborAdvertisement", nil,
+		},
+		ICMPv6TypeRedirect: icmpv6TypeCodeInfoStruct{
+			"Redirect", nil,
+		},
+	}
+)
+
+type ICMPv6TypeCode uint16
+
+// Type returns the ICMPv6 type field.
+func (a ICMPv6TypeCode) Type() uint8 {
+	return uint8(a >> 8)
+}
+
+// Code returns the ICMPv6 code field.
+func (a ICMPv6TypeCode) Code() uint8 {
+	return uint8(a)
+}
+
+func (a ICMPv6TypeCode) String() string {
+	t, c := a.Type(), a.Code()
+	strInfo, ok := icmpv6TypeCodeInfo[t]
+	if !ok {
+		// Unknown ICMPv6 type field
+		return fmt.Sprintf("%d(%d)", t, c)
+	}
+	typeStr := strInfo.typeStr
+	if strInfo.codeStr == nil && c == 0 {
+		// The ICMPv6 type does not make use of the code field
+		return fmt.Sprintf("%s", strInfo.typeStr)
+	}
+	if strInfo.codeStr == nil && c != 0 {
+		// The ICMPv6 type does not make use of the code field, but it is present anyway
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	codeStr, ok := (*strInfo.codeStr)[c]
+	if !ok {
+		// We don't know this ICMPv6 code; print the numerical value
+		return fmt.Sprintf("%s(Code: %d)", typeStr, c)
+	}
+	return fmt.Sprintf("%s(%s)", typeStr, codeStr)
+}
+
+func (a ICMPv6TypeCode) GoString() string {
+	t := reflect.TypeOf(a)
+	return fmt.Sprintf("%s(%d, %d)", t.String(), a.Type(), a.Code())
+}
+
+// SerializeTo writes the ICMPv6TypeCode value to the 'bytes' buffer.
+func (a ICMPv6TypeCode) SerializeTo(bytes []byte) {
+	binary.BigEndian.PutUint16(bytes, uint16(a))
+}
+
+// CreateICMPv6TypeCode is a convenience function to create an ICMPv6TypeCode
+// gopacket type from the ICMPv6 type and code values.
+func CreateICMPv6TypeCode(typ uint8, code uint8) ICMPv6TypeCode {
+	return ICMPv6TypeCode(binary.BigEndian.Uint16([]byte{typ, code}))
+}
+
+// ICMPv6 is the layer for IPv6 ICMP packet data
+type ICMPv6 struct {
+	BaseLayer
+	TypeCode ICMPv6TypeCode
+	Checksum uint16
+	// TypeBytes is deprecated and always nil. See the different ICMPv6 message types
+	// instead (e.g. ICMPv6TypeRouterSolicitation).
+	TypeBytes []byte
+	tcpipchecksum
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6) LayerType() gopacket.LayerType { return LayerTypeICMPv6 }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 4 bytes for ICMPv6 packet")
+	}
+	i.TypeCode = CreateICMPv6TypeCode(data[0], data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.BaseLayer = BaseLayer{data[:4], data[4:]}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	i.TypeCode.SerializeTo(bytes)
+
+	if opts.ComputeChecksums {
+		bytes[2] = 0
+		bytes[3] = 0
+		csum, err := i.computeChecksum(b.Bytes(), IPProtocolICMPv6)
+		if err != nil {
+			return err
+		}
+		i.Checksum = csum
+	}
+	binary.BigEndian.PutUint16(bytes[2:], i.Checksum)
+
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6) NextLayerType() gopacket.LayerType {
+	switch i.TypeCode.Type() {
+	case ICMPv6TypeEchoRequest:
+		return LayerTypeICMPv6Echo
+	case ICMPv6TypeEchoReply:
+		return LayerTypeICMPv6Echo
+	case ICMPv6TypeRouterSolicitation:
+		return LayerTypeICMPv6RouterSolicitation
+	case ICMPv6TypeRouterAdvertisement:
+		return LayerTypeICMPv6RouterAdvertisement
+	case ICMPv6TypeNeighborSolicitation:
+		return LayerTypeICMPv6NeighborSolicitation
+	case ICMPv6TypeNeighborAdvertisement:
+		return LayerTypeICMPv6NeighborAdvertisement
+	case ICMPv6TypeRedirect:
+		return LayerTypeICMPv6Redirect
+	case ICMPv6TypeMLDv1MulticastListenerQueryMessage: // Same Code for MLDv1 Query and MLDv2 Query
+		if len(i.Payload) > 20 { // Only payload size differs
+			return LayerTypeMLDv2MulticastListenerQuery
+		} else {
+			return LayerTypeMLDv1MulticastListenerQuery
+		}
+	case ICMPv6TypeMLDv1MulticastListenerDoneMessage:
+		return LayerTypeMLDv1MulticastListenerDone
+	case ICMPv6TypeMLDv1MulticastListenerReportMessage:
+		return LayerTypeMLDv1MulticastListenerReport
+	case ICMPv6TypeMLDv2MulticastListenerReportMessageV2:
+		return LayerTypeMLDv2MulticastListenerReport
+	}
+
+	return gopacket.LayerTypePayload
+}
+
+func decodeICMPv6(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6{}
+	return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/icmp6msg.go b/vendor/github.com/google/gopacket/layers/icmp6msg.go
new file mode 100644
index 0000000..d9268db
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/icmp6msg.go
@@ -0,0 +1,578 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+// Based on RFC 4861
+
+// ICMPv6Opt indicate how to decode the data associated with each ICMPv6Option.
+type ICMPv6Opt uint8
+
+const (
+	_ ICMPv6Opt = iota
+
+	// ICMPv6OptSourceAddress contains the link-layer address of the sender of
+	// the packet. It is used in the Neighbor Solicitation, Router
+	// Solicitation, and Router Advertisement packets. Must be ignored for other
+	// Neighbor discovery messages.
+	ICMPv6OptSourceAddress
+
+	// ICMPv6OptTargetAddress contains the link-layer address of the target. It
+	// is used in Neighbor Advertisement and Redirect packets. Must be ignored
+	// for other Neighbor discovery messages.
+	ICMPv6OptTargetAddress
+
+	// ICMPv6OptPrefixInfo provides hosts with on-link prefixes and prefixes
+	// for Address Autoconfiguration. The Prefix Information option appears in
+	// Router Advertisement packets and MUST be silently ignored for other
+	// messages.
+	ICMPv6OptPrefixInfo
+
+	// ICMPv6OptRedirectedHeader is used in Redirect messages and contains all
+	// or part of the packet that is being redirected.
+	ICMPv6OptRedirectedHeader
+
+	// ICMPv6OptMTU is used in Router Advertisement messages to ensure that all
+	// nodes on a link use the same MTU value in those cases where the link MTU
+	// is not well known. This option MUST be silently ignored for other
+	// Neighbor Discovery messages.
+	ICMPv6OptMTU
+)
+
+// ICMPv6Echo represents the structure of a ping.
+type ICMPv6Echo struct {
+	BaseLayer
+	Identifier uint16
+	SeqNumber  uint16
+}
+
+// ICMPv6RouterSolicitation is sent by hosts to find routers.
+type ICMPv6RouterSolicitation struct {
+	BaseLayer
+	Options ICMPv6Options
+}
+
+// ICMPv6RouterAdvertisement is sent by routers in response to Solicitation.
+type ICMPv6RouterAdvertisement struct {
+	BaseLayer
+	HopLimit       uint8
+	Flags          uint8
+	RouterLifetime uint16
+	ReachableTime  uint32
+	RetransTimer   uint32
+	Options        ICMPv6Options
+}
+
+// ICMPv6NeighborSolicitation is sent to request the link-layer address of a
+// target node.
+type ICMPv6NeighborSolicitation struct {
+	BaseLayer
+	TargetAddress net.IP
+	Options       ICMPv6Options
+}
+
+// ICMPv6NeighborAdvertisement is sent by nodes in response to Solicitation.
+type ICMPv6NeighborAdvertisement struct {
+	BaseLayer
+	Flags         uint8
+	TargetAddress net.IP
+	Options       ICMPv6Options
+}
+
+// ICMPv6Redirect is sent by routers to inform hosts of a better first-hop node
+// on the path to a destination.
+type ICMPv6Redirect struct {
+	BaseLayer
+	TargetAddress      net.IP
+	DestinationAddress net.IP
+	Options            ICMPv6Options
+}
+
+// ICMPv6Option contains the type and data for a single option.
+type ICMPv6Option struct {
+	Type ICMPv6Opt
+	Data []byte
+}
+
+// ICMPv6Options is a slice of ICMPv6Option.
+type ICMPv6Options []ICMPv6Option
+
+func (i ICMPv6Opt) String() string {
+	switch i {
+	case ICMPv6OptSourceAddress:
+		return "SourceAddress"
+	case ICMPv6OptTargetAddress:
+		return "TargetAddress"
+	case ICMPv6OptPrefixInfo:
+		return "PrefixInfo"
+	case ICMPv6OptRedirectedHeader:
+		return "RedirectedHeader"
+	case ICMPv6OptMTU:
+		return "MTU"
+	default:
+		return fmt.Sprintf("Unknown(%d)", i)
+	}
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Echo) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6Echo
+}
+
+// LayerType returns LayerTypeICMPv6Echo.
+func (i *ICMPv6Echo) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6Echo
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Echo) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Echo) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 4 bytes for ICMPv6 Echo")
+	}
+	i.Identifier = binary.BigEndian.Uint16(data[0:2])
+	i.SeqNumber = binary.BigEndian.Uint16(data[2:4])
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Echo) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+
+	binary.BigEndian.PutUint16(buf, i.Identifier)
+	binary.BigEndian.PutUint16(buf[2:], i.SeqNumber)
+	return nil
+}
+
+// LayerType returns LayerTypeICMPv6.
+func (i *ICMPv6RouterSolicitation) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6RouterSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterSolicitation) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	// first 4 bytes are reserved followed by options
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 4 bytes for ICMPv6 router solicitation")
+	}
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[4:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+
+	copy(buf, lotsOfZeros[:4])
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterSolicitation) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6RouterSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6RouterAdvertisement.
+func (i *ICMPv6RouterAdvertisement) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6RouterAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6RouterAdvertisement) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6RouterAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 12 bytes for ICMPv6 router advertisement")
+	}
+
+	i.HopLimit = uint8(data[0])
+	// M, O bit followed by 6 reserved bits
+	i.Flags = uint8(data[1])
+	i.RouterLifetime = binary.BigEndian.Uint16(data[2:4])
+	i.ReachableTime = binary.BigEndian.Uint32(data[4:8])
+	i.RetransTimer = binary.BigEndian.Uint32(data[8:12])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[12:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6RouterAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(12)
+	if err != nil {
+		return err
+	}
+
+	buf[0] = byte(i.HopLimit)
+	buf[1] = byte(i.Flags)
+	binary.BigEndian.PutUint16(buf[2:], i.RouterLifetime)
+	binary.BigEndian.PutUint32(buf[4:], i.ReachableTime)
+	binary.BigEndian.PutUint32(buf[8:], i.RetransTimer)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6RouterAdvertisement) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6RouterAdvertisement
+}
+
+// ManagedAddressConfig is true when addresses are available via DHCPv6. If
+// set, the OtherConfig flag is redundant.
+func (i *ICMPv6RouterAdvertisement) ManagedAddressConfig() bool {
+	return i.Flags&0x80 != 0
+}
+
+// OtherConfig is true when there is other configuration information available
+// via DHCPv6. For example, DNS-related information.
+func (i *ICMPv6RouterAdvertisement) OtherConfig() bool {
+	return i.Flags&0x40 != 0
+}
+
+// LayerType returns LayerTypeICMPv6NeighborSolicitation.
+func (i *ICMPv6NeighborSolicitation) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6NeighborSolicitation
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborSolicitation) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborSolicitation) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor solicitation")
+	}
+
+	i.TargetAddress = net.IP(data[4:20])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborSolicitation) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	copy(buf, lotsOfZeros[:4])
+	copy(buf[4:], i.TargetAddress)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborSolicitation) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6NeighborSolicitation
+}
+
+// LayerType returns LayerTypeICMPv6NeighborAdvertisement.
+func (i *ICMPv6NeighborAdvertisement) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6NeighborAdvertisement) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6NeighborAdvertisement) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 20 bytes for ICMPv6 neighbor advertisement")
+	}
+
+	i.Flags = uint8(data[0])
+	i.TargetAddress = net.IP(data[4:20])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[20:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6NeighborAdvertisement) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	buf[0] = byte(i.Flags)
+	copy(buf[1:], lotsOfZeros[:3])
+	copy(buf[4:], i.TargetAddress)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6NeighborAdvertisement) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6NeighborAdvertisement
+}
+
+// Router indicates whether the sender is a router or not.
+func (i *ICMPv6NeighborAdvertisement) Router() bool {
+	return i.Flags&0x80 != 0
+}
+
+// Solicited indicates whether the advertisement was solicited or not.
+func (i *ICMPv6NeighborAdvertisement) Solicited() bool {
+	return i.Flags&0x40 != 0
+}
+
+// Override indicates whether the advertisement should Override an existing
+// cache entry.
+func (i *ICMPv6NeighborAdvertisement) Override() bool {
+	return i.Flags&0x20 != 0
+}
+
+// LayerType returns LayerTypeICMPv6Redirect.
+func (i *ICMPv6Redirect) LayerType() gopacket.LayerType {
+	return LayerTypeICMPv6Redirect
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *ICMPv6Redirect) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Redirect) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 36 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less then 36 bytes for ICMPv6 redirect")
+	}
+
+	i.TargetAddress = net.IP(data[4:20])
+	i.DestinationAddress = net.IP(data[20:36])
+	i.BaseLayer = BaseLayer{data, nil} // assume no payload
+
+	// truncate old options
+	i.Options = i.Options[:0]
+
+	return i.Options.DecodeFromBytes(data[36:], df)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Redirect) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := i.Options.SerializeTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(36)
+	if err != nil {
+		return err
+	}
+
+	copy(buf, lotsOfZeros[:4])
+	copy(buf[4:], i.TargetAddress)
+	copy(buf[20:], i.DestinationAddress)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *ICMPv6Redirect) CanDecode() gopacket.LayerClass {
+	return LayerTypeICMPv6Redirect
+}
+
+func (i ICMPv6Option) String() string {
+	hd := hex.EncodeToString(i.Data)
+	if len(hd) > 0 {
+		hd = " 0x" + hd
+	}
+
+	switch i.Type {
+	case ICMPv6OptSourceAddress, ICMPv6OptTargetAddress:
+		return fmt.Sprintf("ICMPv6Option(%s:%v)",
+			i.Type,
+			net.HardwareAddr(i.Data))
+	case ICMPv6OptPrefixInfo:
+		if len(i.Data) == 30 {
+			prefixLen := uint8(i.Data[0])
+			onLink := (i.Data[1]&0x80 != 0)
+			autonomous := (i.Data[1]&0x40 != 0)
+			validLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[2:6])) * time.Second
+			preferredLifetime := time.Duration(binary.BigEndian.Uint32(i.Data[6:10])) * time.Second
+
+			prefix := net.IP(i.Data[14:])
+
+			return fmt.Sprintf("ICMPv6Option(%s:%v/%v:%t:%t:%v:%v)",
+				i.Type,
+				prefix, prefixLen,
+				onLink, autonomous,
+				validLifetime, preferredLifetime)
+		}
+	case ICMPv6OptRedirectedHeader:
+		// could invoke IP decoder on data... probably best not to
+		break
+	case ICMPv6OptMTU:
+		if len(i.Data) == 6 {
+			return fmt.Sprintf("ICMPv6Option(%s:%v)",
+				i.Type,
+				binary.BigEndian.Uint32(i.Data[2:]))
+		}
+
+	}
+	return fmt.Sprintf("ICMPv6Option(%s:%s)", i.Type, hd)
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *ICMPv6Options) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	for len(data) > 0 {
+		if len(data) < 2 {
+			df.SetTruncated()
+			return errors.New("ICMP layer less then 2 bytes for ICMPv6 message option")
+		}
+
+		// unit is 8 octets, convert to bytes
+		length := int(data[1]) * 8
+
+		if length == 0 {
+			df.SetTruncated()
+			return errors.New("ICMPv6 message option with length 0")
+		}
+
+		if len(data) < length {
+			df.SetTruncated()
+			return fmt.Errorf("ICMP layer only %v bytes for ICMPv6 message option with length %v", len(data), length)
+		}
+
+		o := ICMPv6Option{
+			Type: ICMPv6Opt(data[0]),
+			Data: data[2:length],
+		}
+
+		// chop off option we just consumed
+		data = data[length:]
+
+		*i = append(*i, o)
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *ICMPv6Options) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	for _, opt := range []ICMPv6Option(*i) {
+		length := len(opt.Data) + 2
+		buf, err := b.PrependBytes(length)
+		if err != nil {
+			return err
+		}
+
+		buf[0] = byte(opt.Type)
+		buf[1] = byte(length / 8)
+		copy(buf[2:], opt.Data)
+	}
+
+	return nil
+}
+
+func decodeICMPv6Echo(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6Echo{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterSolicitation(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6RouterSolicitation{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6RouterAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6RouterAdvertisement{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborSolicitation(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6NeighborSolicitation{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6NeighborAdvertisement(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6NeighborAdvertisement{}
+	return decodingLayerDecoder(i, data, p)
+}
+
+func decodeICMPv6Redirect(data []byte, p gopacket.PacketBuilder) error {
+	i := &ICMPv6Redirect{}
+	return decodingLayerDecoder(i, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/igmp.go b/vendor/github.com/google/gopacket/layers/igmp.go
new file mode 100644
index 0000000..fce84db
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/igmp.go
@@ -0,0 +1,539 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+type IGMPType uint8
+
+const (
+	IGMPMembershipQuery    IGMPType = 0x11 // General or group specific query
+	IGMPMembershipReportV1 IGMPType = 0x12 // Version 1 Membership Report
+	IGMPMembershipReportV2 IGMPType = 0x16 // Version 2 Membership Report
+	IGMPLeaveGroup         IGMPType = 0x17 // Leave Group
+	IGMPMembershipReportV3 IGMPType = 0x22 // Version 3 Membership Report
+)
+
+// String conversions for IGMP message types
+func (i IGMPType) String() string {
+	switch i {
+	case IGMPMembershipQuery:
+		return "IGMP Membership Query"
+	case IGMPMembershipReportV1:
+		return "IGMPv1 Membership Report"
+	case IGMPMembershipReportV2:
+		return "IGMPv2 Membership Report"
+	case IGMPMembershipReportV3:
+		return "IGMPv3 Membership Report"
+	case IGMPLeaveGroup:
+		return "Leave Group"
+	default:
+		return ""
+	}
+}
+
+type IGMPv3GroupRecordType uint8
+
+const (
+	IGMPIsIn  IGMPv3GroupRecordType = 0x01 // Type MODE_IS_INCLUDE, source addresses x
+	IGMPIsEx  IGMPv3GroupRecordType = 0x02 // Type MODE_IS_EXCLUDE, source addresses x
+	IGMPToIn  IGMPv3GroupRecordType = 0x03 // Type CHANGE_TO_INCLUDE_MODE, source addresses x
+	IGMPToEx  IGMPv3GroupRecordType = 0x04 // Type CHANGE_TO_EXCLUDE_MODE, source addresses x
+	IGMPAllow IGMPv3GroupRecordType = 0x05 // Type ALLOW_NEW_SOURCES, source addresses x
+	IGMPBlock IGMPv3GroupRecordType = 0x06 // Type BLOCK_OLD_SOURCES, source addresses x
+)
+
+func (i IGMPv3GroupRecordType) String() string {
+	switch i {
+	case IGMPIsIn:
+		return "MODE_IS_INCLUDE"
+	case IGMPIsEx:
+		return "MODE_IS_EXCLUDE"
+	case IGMPToIn:
+		return "CHANGE_TO_INCLUDE_MODE"
+	case IGMPToEx:
+		return "CHANGE_TO_EXCLUDE_MODE"
+	case IGMPAllow:
+		return "ALLOW_NEW_SOURCES"
+	case IGMPBlock:
+		return "BLOCK_OLD_SOURCES"
+	default:
+		return ""
+	}
+}
+
+// IGMP represents an IGMPv3 message.
+type IGMP struct {
+	BaseLayer
+	Type                    IGMPType
+	MaxResponseTime         time.Duration
+	Checksum                uint16
+	GroupAddress            net.IP
+	SupressRouterProcessing bool
+	RobustnessValue         uint8
+	IntervalTime            time.Duration
+	SourceAddresses         []net.IP
+	NumberOfGroupRecords    uint16
+	NumberOfSources         uint16
+	GroupRecords            []IGMPv3GroupRecord
+	Version                 uint8 // IGMP protocol version
+}
+
+// IGMPv1or2 stores header details for an IGMPv1 or IGMPv2 packet.
+//
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |      Type     | Max Resp Time |           Checksum            |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                         Group Address                         |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+type IGMPv1or2 struct {
+	BaseLayer
+	Type            IGMPType      // IGMP message type
+	MaxResponseTime time.Duration // meaningful only in Membership Query messages
+	Checksum        uint16        // 16-bit checksum of entire ip payload
+	GroupAddress    net.IP        // either 0 or an IP multicast address
+	Version         uint8
+}
+
+// decodeResponse dissects IGMPv1 or IGMPv2 packet.
+func (i *IGMPv1or2) decodeResponse(data []byte) error {
+	if len(data) < 8 {
+		return errors.New("IGMP packet too small")
+	}
+
+	i.MaxResponseTime = igmpTimeDecode(data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.GroupAddress = net.IP(data[4:8])
+
+	return nil
+}
+
+func igmpchecksum(bytes []byte) uint16 {
+	// Assumed that the checksum bytes are set to zero
+	var csum uint32
+	for i := 0; i < len(bytes); i += 2 {
+		csum += uint32(bytes[i]) << 8
+		csum += uint32(bytes[i+1])
+	}
+	for {
+		// Break when sum is less or equals to 0xFFFF
+		if csum <= 65535 {
+			break
+		}
+		// Add carry to the sum
+		csum = (csum >> 16) + uint32(uint16(csum))
+	}
+	// Flip all the bits
+	return ^uint16(csum)
+}
+
+func (i *IGMPv1or2) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	// Put the packet type
+	bytes[0] = byte(i.Type)
+	bytes[1] = igmpTimeEncode(i.MaxResponseTime)
+	// Put the checksum as zero to start
+	binary.BigEndian.PutUint16(bytes[2:], 0)
+	addr, err := checkIPv4Address(i.GroupAddress)
+	if err != nil {
+		return err
+	}
+	i.GroupAddress = addr
+	copy(bytes[4:8], i.GroupAddress)
+	csum := igmpchecksum(bytes)
+	binary.BigEndian.PutUint16(bytes[2:], csum)
+	return nil
+}
+
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |  Type = 0x22  |    Reserved   |           Checksum            |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |           Reserved            |  Number of Group Records (M)  |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                        Group Record [1]                       .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                        Group Record [2]                       .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                        Group Record [M]                       .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |  Record Type  |  Aux Data Len |     Number of Sources (N)     |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       Multicast Address                       |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       Source Address [1]                      |
+// +-                                                             -+
+// |                       Source Address [2]                      |
+// +-                                                             -+
+// |                       Source Address [N]                      |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                                                               |
+// .                         Auxiliary Data                        .
+// |                                                               |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// IGMPv3GroupRecord stores individual group records for a V3 Membership Report message.
+type IGMPv3GroupRecord struct {
+	Type             IGMPv3GroupRecordType
+	AuxDataLen       uint8 // this should always be 0 as per IGMPv3 spec.
+	NumberOfSources  uint16
+	MulticastAddress net.IP
+	SourceAddresses  []net.IP
+	AuxData          uint32 // NOT USED
+}
+
+func (g *IGMPv3GroupRecord) length() int {
+	return(8 + 4 * len(g.SourceAddresses))
+}
+
+func (g *IGMPv3GroupRecord) encode(b []byte) (int, error) {
+	length := g.length()
+	b[0] = byte(g.Type) // Record Type
+	b[1] = byte(0) // Aux data length
+	binary.BigEndian.PutUint16(b[2:], uint16(len(g.SourceAddresses)))
+	addr, err := checkIPv4Address(g.MulticastAddress)
+	if err != nil {
+		return 0, err
+	}
+	copy(b[4:8], addr)
+	start := 8
+	for i, src := range g.SourceAddresses {
+		addr1, err := checkIPv4Address(src)
+		if err != nil {
+			return 0, err
+		}
+		copy(b[start+i*4 : start+(i+1)*4], addr1)
+	}
+	return length, nil
+}
+
+func (i *IGMP) decodeIGMPv3MembershipReport(data []byte) error {
+	if len(data) < 8 {
+		return errors.New("IGMPv3 Membership Report too small #1")
+	}
+
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.NumberOfGroupRecords = binary.BigEndian.Uint16(data[6:8])
+
+	recordOffset := 8
+	for j := 0; j < int(i.NumberOfGroupRecords); j++ {
+		if len(data) < recordOffset+8 {
+			return errors.New("IGMPv3 Membership Report too small #2")
+		}
+
+		var gr IGMPv3GroupRecord
+		gr.Type = IGMPv3GroupRecordType(data[recordOffset])
+		gr.AuxDataLen = data[recordOffset+1]
+		gr.NumberOfSources = binary.BigEndian.Uint16(data[recordOffset+2 : recordOffset+4])
+		gr.MulticastAddress = net.IP(data[recordOffset+4 : recordOffset+8])
+
+		if len(data) < recordOffset+8+int(gr.NumberOfSources)*4 {
+			return errors.New("IGMPv3 Membership Report too small #3")
+		}
+
+		// append source address records.
+		for i := 0; i < int(gr.NumberOfSources); i++ {
+			sourceAddr := net.IP(data[recordOffset+8+i*4 : recordOffset+12+i*4])
+			gr.SourceAddresses = append(gr.SourceAddresses, sourceAddr)
+		}
+
+		i.GroupRecords = append(i.GroupRecords, gr)
+		recordOffset += 8 + 4*int(gr.NumberOfSources)
+	}
+	return nil
+}
+
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |  Type = 0x11  | Max Resp Code |           Checksum            |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                         Group Address                         |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Resv  |S| QRV |     QQIC      |     Number of Sources (N)     |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |                       Source Address [1]                      |
+// +-                                                             -+
+// |                       Source Address [2]                      |
+// +-                              .                              -+
+// |                       Source Address [N]                      |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// decodeIGMPv3MembershipQuery parses the IGMPv3 message of type 0x11
+func (i *IGMP) decodeIGMPv3MembershipQuery(data []byte) error {
+	if len(data) < 12 {
+		return errors.New("IGMPv3 Membership Query too small #1")
+	}
+
+	i.MaxResponseTime = igmpTimeDecode(data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.SupressRouterProcessing = data[8]&0x8 != 0
+	i.GroupAddress = net.IP(data[4:8])
+	i.RobustnessValue = data[8] & 0x7
+	i.IntervalTime = igmpTimeDecode(data[9])
+	i.NumberOfSources = binary.BigEndian.Uint16(data[10:12])
+
+	if len(data) < 12+int(i.NumberOfSources)*4 {
+		return errors.New("IGMPv3 Membership Query too small #2")
+	}
+
+	for j := 0; j < int(i.NumberOfSources); j++ {
+		i.SourceAddresses = append(i.SourceAddresses, net.IP(data[12+j*4:16+j*4]))
+	}
+
+	return nil
+}
+
+// igmpTimeDecode decodes the duration created by the given byte, using the
+// algorithm in http://www.rfc-base.org/txt/rfc-3376.txt section 4.1.1.
+func igmpTimeDecode(t uint8) time.Duration {
+	if t&0x80 == 0 {
+		return time.Millisecond * 100 * time.Duration(t)
+	}
+	mant := (t & 0x70) >> 4
+	exp := t & 0x0F
+	return time.Millisecond * 100 * time.Duration((mant|0x10)<<(exp+3))
+}
+
+func igmpTimeEncode(t time.Duration) byte {
+	decisecs := uint32(t/(100 * time.Millisecond))
+	maxexp := 7 + 3 // exp + 3, 7 from 3 bits of exp
+	maxmsb := maxexp + 4 + 1 // mant | 0x10
+	if decisecs < 127 {
+		return byte(decisecs)
+	} else {
+		for i := 31; i > 10; i-- {
+			mask := uint32(1) << uint8(i)
+			if decisecs & mask != 0 {
+				if i > maxmsb {
+					break
+				}
+				exp := byte(i - 3)
+				mant := byte(decisecs >> uint8(i - 5))
+				return byte(0x80) | exp << 4 | (mant & 0x0f)
+			}
+		}
+	}
+	return byte(127)
+}
+
+func igmpIntervalEncode(t time.Duration) byte {
+	secs := uint32(t/(time.Second))
+	maxexp := 7 + 3 // exp + 3, 7 from 3 bits of exp
+	maxmsb := maxexp + 4 + 1 // mant | 0x10
+	if secs < 127 {
+		return byte(secs)
+	} else {
+		for i := 31; i > 10; i-- {
+			mask := uint32(1) << uint8(i)
+			if secs & mask != 0 {
+				if i > maxmsb {
+					break
+				}
+				exp := byte(i - 3)
+				mant := byte(secs >> uint8(i - 5))
+				return byte(0x80) | exp << 4 | (mant & 0x0f)
+			}
+		}
+	}
+	return byte(127)
+}
+
+// LayerType returns LayerTypeIGMP for the V1,2,3 message protocol formats.
+func (i *IGMP) LayerType() gopacket.LayerType      { return LayerTypeIGMP }
+func (i *IGMPv1or2) LayerType() gopacket.LayerType { return LayerTypeIGMP }
+
+func (i *IGMPv1or2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		return errors.New("IGMP Packet too small")
+	}
+
+	i.Type = IGMPType(data[0])
+	i.MaxResponseTime = igmpTimeDecode(data[1])
+	i.Checksum = binary.BigEndian.Uint16(data[2:4])
+	i.GroupAddress = net.IP(data[4:8])
+
+	return nil
+}
+
+func (i *IGMPv1or2) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+func (i *IGMPv1or2) CanDecode() gopacket.LayerClass {
+	return LayerTypeIGMP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (i *IGMP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 1 {
+		return errors.New("IGMP packet is too small")
+	}
+
+	// common IGMP header values between versions 1..3 of IGMP specification..
+	i.Type = IGMPType(data[0])
+
+	switch i.Type {
+	case IGMPMembershipQuery:
+		i.decodeIGMPv3MembershipQuery(data)
+	case IGMPMembershipReportV3:
+		i.decodeIGMPv3MembershipReport(data)
+	default:
+		return errors.New("unsupported IGMP type")
+	}
+
+	return nil
+}
+
+func (i *IGMP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// Get the length of the packet and preprend bytes
+	switch i.Type {
+	case IGMPMembershipQuery:
+		return i.serializeIGMPv3MembershipQuery(b, opts)
+	case IGMPMembershipReportV3:
+		return i.serializeIGMPv3MembershipReport(b, opts)
+	default:
+	}
+	return errors.New("Unsupported IGMPv3 Message Type")
+}
+
+func (i *IGMP) serializeIGMPv3MembershipQuery(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	numsrcip := len(i.SourceAddresses)
+	length := 12 + (4 * numsrcip)
+	bytes, err := b.PrependBytes(length)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(i.Type)
+	bytes[1] = igmpTimeEncode(i.MaxResponseTime)
+	// Set the checksum to 0 initially
+	binary.BigEndian.PutUint16(bytes[2:], 0)
+	addr, err := checkIPv4Address(i.GroupAddress)
+	if err != nil {
+		return err
+	}
+	i.GroupAddress = addr
+	copy(bytes[4:8], i.GroupAddress)
+	bytes[8] = 0
+	bytes[9] = igmpIntervalEncode(i.IntervalTime)
+	binary.BigEndian.PutUint16(bytes[10:], uint16(numsrcip))
+	for i, ip := range i.SourceAddresses {
+		addr1, err := checkIPv4Address(ip)
+		if err != nil {
+			return err
+		}
+		copy(bytes[12 + i*4:12 + (i+1)*4], addr1)
+	}
+	csum := igmpchecksum(bytes)
+	binary.BigEndian.PutUint16(bytes[2:], csum)
+	return nil
+}
+
+func (i *IGMP) serializeIGMPv3MembershipReport(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	// This is for the first two 32 bit rows in the report which is the fixed part
+	length := 8
+	for _, g := range i.GroupRecords {
+		length = length + g.length()
+	}
+	bytes, err := b.PrependBytes(length)
+	if err != nil {
+		return err
+	}
+	bytes[0] = byte(i.Type)
+	bytes[1] = byte(0)
+	// Checksum needs to go here
+	binary.BigEndian.PutUint16(bytes[2:], uint16(0))
+
+	// The reserved field and the number of IGMP group records
+	binary.BigEndian.PutUint16(bytes[4:], uint16(0))
+	binary.BigEndian.PutUint16(bytes[6:], uint16(len(i.GroupRecords)))
+	start := 8
+	for _, g := range i.GroupRecords {
+		numb, err := g.encode(bytes[start:])
+		if err != nil {
+			return err
+		}
+		start = start + numb
+	}
+	csum := igmpchecksum(bytes)
+	binary.BigEndian.PutUint16(bytes[2:], csum)
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (i *IGMP) CanDecode() gopacket.LayerClass {
+	return LayerTypeIGMP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (i *IGMP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// decodeIGMP will parse IGMP v1,2 or 3 protocols. Checks against the
+// IGMP type are performed against byte[0], logic then iniitalizes and
+// passes the appropriate struct (IGMP or IGMPv1or2) to
+// decodingLayerDecoder.
+func decodeIGMP(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 1 {
+		return errors.New("IGMP packet is too small")
+	}
+
+	// byte 0 contains IGMP message type.
+	switch IGMPType(data[0]) {
+	case IGMPMembershipQuery:
+		// IGMPv3 Membership Query payload is >= 12
+		if len(data) >= 12 {
+			i := &IGMP{Version: 3}
+			return decodingLayerDecoder(i, data, p)
+		} else if len(data) == 8 {
+			i := &IGMPv1or2{}
+			if data[1] == 0x00 {
+				i.Version = 1 // IGMPv1 has a query length of 8 and MaxResp = 0
+			} else {
+				i.Version = 2 // IGMPv2 has a query length of 8 and MaxResp != 0
+			}
+
+			return decodingLayerDecoder(i, data, p)
+		}
+	case IGMPMembershipReportV3:
+		i := &IGMP{Version: 3}
+		return decodingLayerDecoder(i, data, p)
+	case IGMPMembershipReportV1:
+		i := &IGMPv1or2{Version: 1}
+		return decodingLayerDecoder(i, data, p)
+	case IGMPLeaveGroup, IGMPMembershipReportV2:
+		// leave group and Query Report v2 used in IGMPv2 only.
+		i := &IGMPv1or2{Version: 2}
+		return decodingLayerDecoder(i, data, p)
+	default:
+	}
+
+	return errors.New("Unable to determine IGMP type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip4.go b/vendor/github.com/google/gopacket/layers/ip4.go
new file mode 100644
index 0000000..2b3c0c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip4.go
@@ -0,0 +1,325 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+type IPv4Flag uint8
+
+const (
+	IPv4EvilBit       IPv4Flag = 1 << 2 // http://tools.ietf.org/html/rfc3514 ;)
+	IPv4DontFragment  IPv4Flag = 1 << 1
+	IPv4MoreFragments IPv4Flag = 1 << 0
+)
+
+func (f IPv4Flag) String() string {
+	var s []string
+	if f&IPv4EvilBit != 0 {
+		s = append(s, "Evil")
+	}
+	if f&IPv4DontFragment != 0 {
+		s = append(s, "DF")
+	}
+	if f&IPv4MoreFragments != 0 {
+		s = append(s, "MF")
+	}
+	return strings.Join(s, "|")
+}
+
+// IPv4 is the header of an IP packet.
+type IPv4 struct {
+	BaseLayer
+	Version    uint8
+	IHL        uint8
+	TOS        uint8
+	Length     uint16
+	Id         uint16
+	Flags      IPv4Flag
+	FragOffset uint16
+	TTL        uint8
+	Protocol   IPProtocol
+	Checksum   uint16
+	SrcIP      net.IP
+	DstIP      net.IP
+	Options    []IPv4Option
+	Padding    []byte
+}
+
+// LayerType returns LayerTypeIPv4
+func (i *IPv4) LayerType() gopacket.LayerType { return LayerTypeIPv4 }
+func (i *IPv4) NetworkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointIPv4, i.SrcIP, i.DstIP)
+}
+
+type IPv4Option struct {
+	OptionType   uint8
+	OptionLength uint8
+	OptionData   []byte
+}
+
+func (i IPv4Option) String() string {
+	return fmt.Sprintf("IPv4Option(%v:%v)", i.OptionType, i.OptionData)
+}
+
+// for the current ipv4 options, return the number of bytes (including
+// padding that the options used)
+func (ip *IPv4) getIPv4OptionSize() uint8 {
+	optionSize := uint8(0)
+	for _, opt := range ip.Options {
+		switch opt.OptionType {
+		case 0:
+			// this is the end of option lists
+			optionSize++
+		case 1:
+			// this is the padding
+			optionSize++
+		default:
+			optionSize += opt.OptionLength
+
+		}
+	}
+	// make sure the options are aligned to 32 bit boundary
+	if (optionSize % 4) != 0 {
+		optionSize += 4 - (optionSize % 4)
+	}
+	return optionSize
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (ip *IPv4) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	optionLength := ip.getIPv4OptionSize()
+	bytes, err := b.PrependBytes(20 + int(optionLength))
+	if err != nil {
+		return err
+	}
+	if opts.FixLengths {
+		ip.IHL = 5 + (optionLength / 4)
+		ip.Length = uint16(len(b.Bytes()))
+	}
+	bytes[0] = (ip.Version << 4) | ip.IHL
+	bytes[1] = ip.TOS
+	binary.BigEndian.PutUint16(bytes[2:], ip.Length)
+	binary.BigEndian.PutUint16(bytes[4:], ip.Id)
+	binary.BigEndian.PutUint16(bytes[6:], ip.flagsfrags())
+	bytes[8] = ip.TTL
+	bytes[9] = byte(ip.Protocol)
+	if err := ip.AddressTo4(); err != nil {
+		return err
+	}
+	copy(bytes[12:16], ip.SrcIP)
+	copy(bytes[16:20], ip.DstIP)
+
+	curLocation := 20
+	// Now, we will encode the options
+	for _, opt := range ip.Options {
+		switch opt.OptionType {
+		case 0:
+			// this is the end of option lists
+			bytes[curLocation] = 0
+			curLocation++
+		case 1:
+			// this is the padding
+			bytes[curLocation] = 1
+			curLocation++
+		default:
+			bytes[curLocation] = opt.OptionType
+			bytes[curLocation+1] = opt.OptionLength
+
+			// sanity checking to protect us from buffer overrun
+			if len(opt.OptionData) > int(opt.OptionLength-2) {
+				return errors.New("option length is smaller than length of option data")
+			}
+			copy(bytes[curLocation+2:curLocation+int(opt.OptionLength)], opt.OptionData)
+			curLocation += int(opt.OptionLength)
+		}
+	}
+
+	if opts.ComputeChecksums {
+		ip.Checksum = checksum(bytes)
+	}
+	binary.BigEndian.PutUint16(bytes[10:], ip.Checksum)
+	return nil
+}
+
+func checksum(bytes []byte) uint16 {
+	// Clear checksum bytes
+	bytes[10] = 0
+	bytes[11] = 0
+
+	// Compute checksum
+	var csum uint32
+	for i := 0; i < len(bytes); i += 2 {
+		csum += uint32(bytes[i]) << 8
+		csum += uint32(bytes[i+1])
+	}
+	for {
+		// Break when sum is less or equals to 0xFFFF
+		if csum <= 65535 {
+			break
+		}
+		// Add carry to the sum
+		csum = (csum >> 16) + uint32(uint16(csum))
+	}
+	// Flip all the bits
+	return ^uint16(csum)
+}
+
+func (ip *IPv4) flagsfrags() (ff uint16) {
+	ff |= uint16(ip.Flags) << 13
+	ff |= ip.FragOffset
+	return
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (ip *IPv4) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid ip4 header. Length %d less than 20", len(data))
+	}
+	flagsfrags := binary.BigEndian.Uint16(data[6:8])
+
+	ip.Version = uint8(data[0]) >> 4
+	ip.IHL = uint8(data[0]) & 0x0F
+	ip.TOS = data[1]
+	ip.Length = binary.BigEndian.Uint16(data[2:4])
+	ip.Id = binary.BigEndian.Uint16(data[4:6])
+	ip.Flags = IPv4Flag(flagsfrags >> 13)
+	ip.FragOffset = flagsfrags & 0x1FFF
+	ip.TTL = data[8]
+	ip.Protocol = IPProtocol(data[9])
+	ip.Checksum = binary.BigEndian.Uint16(data[10:12])
+	ip.SrcIP = data[12:16]
+	ip.DstIP = data[16:20]
+	ip.Options = ip.Options[:0]
+	ip.Padding = nil
+	// Set up an initial guess for contents/payload... we'll reset these soon.
+	ip.BaseLayer = BaseLayer{Contents: data}
+
+	// This code is added for the following enviroment:
+	// * Windows 10 with TSO option activated. ( tested on Hyper-V, RealTek ethernet driver )
+	if ip.Length == 0 {
+		// If using TSO(TCP Segmentation Offload), length is zero.
+		// The actual packet length is the length of data.
+		ip.Length = uint16(len(data))
+	}
+
+	if ip.Length < 20 {
+		return fmt.Errorf("Invalid (too small) IP length (%d < 20)", ip.Length)
+	} else if ip.IHL < 5 {
+		return fmt.Errorf("Invalid (too small) IP header length (%d < 5)", ip.IHL)
+	} else if int(ip.IHL*4) > int(ip.Length) {
+		return fmt.Errorf("Invalid IP header length > IP length (%d > %d)", ip.IHL, ip.Length)
+	}
+	if cmp := len(data) - int(ip.Length); cmp > 0 {
+		data = data[:ip.Length]
+	} else if cmp < 0 {
+		df.SetTruncated()
+		if int(ip.IHL)*4 > len(data) {
+			return errors.New("Not all IP header bytes available")
+		}
+	}
+	ip.Contents = data[:ip.IHL*4]
+	ip.Payload = data[ip.IHL*4:]
+	// From here on, data contains the header options.
+	data = data[20 : ip.IHL*4]
+	// Pull out IP options
+	for len(data) > 0 {
+		if ip.Options == nil {
+			// Pre-allocate to avoid growing the slice too much.
+			ip.Options = make([]IPv4Option, 0, 4)
+		}
+		opt := IPv4Option{OptionType: data[0]}
+		switch opt.OptionType {
+		case 0: // End of options
+			opt.OptionLength = 1
+			ip.Options = append(ip.Options, opt)
+			ip.Padding = data[1:]
+			return nil
+		case 1: // 1 byte padding
+			opt.OptionLength = 1
+			data = data[1:]
+			ip.Options = append(ip.Options, opt)
+		default:
+			if len(data) < 2 {
+				df.SetTruncated()
+				return fmt.Errorf("Invalid ip4 option length. Length %d less than 2", len(data))
+			}
+			opt.OptionLength = data[1]
+			if len(data) < int(opt.OptionLength) {
+				df.SetTruncated()
+				return fmt.Errorf("IP option length exceeds remaining IP header size, option type %v length %v", opt.OptionType, opt.OptionLength)
+			}
+			if opt.OptionLength <= 2 {
+				return fmt.Errorf("Invalid IP option type %v length %d. Must be greater than 2", opt.OptionType, opt.OptionLength)
+			}
+			opt.OptionData = data[2:opt.OptionLength]
+			data = data[opt.OptionLength:]
+			ip.Options = append(ip.Options, opt)
+		}
+	}
+	return nil
+}
+
+func (i *IPv4) CanDecode() gopacket.LayerClass {
+	return LayerTypeIPv4
+}
+
+func (i *IPv4) NextLayerType() gopacket.LayerType {
+	if i.Flags&IPv4MoreFragments != 0 || i.FragOffset != 0 {
+		return gopacket.LayerTypeFragment
+	}
+	return i.Protocol.LayerType()
+}
+
+func decodeIPv4(data []byte, p gopacket.PacketBuilder) error {
+	ip := &IPv4{}
+	err := ip.DecodeFromBytes(data, p)
+	p.AddLayer(ip)
+	p.SetNetworkLayer(ip)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(ip.NextLayerType())
+}
+
+func checkIPv4Address(addr net.IP) (net.IP, error) {
+	if c := addr.To4(); c != nil {
+		return c, nil
+	}
+	if len(addr) == net.IPv6len {
+		return nil, errors.New("address is IPv6")
+	}
+	return nil, fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv4len)
+}
+
+func (ip *IPv4) AddressTo4() error {
+	var src, dst net.IP
+
+	if addr, err := checkIPv4Address(ip.SrcIP); err != nil {
+		return fmt.Errorf("Invalid source IPv4 address (%s)", err)
+	} else {
+		src = addr
+	}
+	if addr, err := checkIPv4Address(ip.DstIP); err != nil {
+		return fmt.Errorf("Invalid destination IPv4 address (%s)", err)
+	} else {
+		dst = addr
+	}
+	ip.SrcIP = src
+	ip.DstIP = dst
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ip6.go b/vendor/github.com/google/gopacket/layers/ip6.go
new file mode 100644
index 0000000..87b9d33
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ip6.go
@@ -0,0 +1,722 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// IPv6HopByHopOptionJumbogram code as defined in RFC 2675
+	IPv6HopByHopOptionJumbogram = 0xC2
+)
+
+const (
+	ipv6MaxPayloadLength = 65535
+)
+
+// IPv6 is the layer for the IPv6 header.
+type IPv6 struct {
+	// http://www.networksorcery.com/enp/protocol/ipv6.htm
+	BaseLayer
+	Version      uint8
+	TrafficClass uint8
+	FlowLabel    uint32
+	Length       uint16
+	NextHeader   IPProtocol
+	HopLimit     uint8
+	SrcIP        net.IP
+	DstIP        net.IP
+	HopByHop     *IPv6HopByHop
+	// hbh will be pointed to by HopByHop if that layer exists.
+	hbh IPv6HopByHop
+}
+
+// LayerType returns LayerTypeIPv6
+func (ipv6 *IPv6) LayerType() gopacket.LayerType { return LayerTypeIPv6 }
+
+// NetworkFlow returns this new Flow (EndpointIPv6, SrcIP, DstIP)
+func (ipv6 *IPv6) NetworkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointIPv6, ipv6.SrcIP, ipv6.DstIP)
+}
+
+// Search for Jumbo Payload TLV in IPv6HopByHop and return (length, true) if found
+func getIPv6HopByHopJumboLength(hopopts *IPv6HopByHop) (uint32, bool, error) {
+	var tlv *IPv6HopByHopOption
+
+	for _, t := range hopopts.Options {
+		if t.OptionType == IPv6HopByHopOptionJumbogram {
+			tlv = t
+			break
+		}
+	}
+	if tlv == nil {
+		// Not found
+		return 0, false, nil
+	}
+	if len(tlv.OptionData) != 4 {
+		return 0, false, errors.New("Jumbo length TLV data must have length 4")
+	}
+	l := binary.BigEndian.Uint32(tlv.OptionData)
+	if l <= ipv6MaxPayloadLength {
+		return 0, false, fmt.Errorf("Jumbo length cannot be less than %d", ipv6MaxPayloadLength+1)
+	}
+	// Found
+	return l, true, nil
+}
+
+// Adds zero-valued Jumbo TLV to IPv6 header if it does not exist
+// (if necessary add hop-by-hop header)
+func addIPv6JumboOption(ip6 *IPv6) {
+	var tlv *IPv6HopByHopOption
+
+	if ip6.HopByHop == nil {
+		// Add IPv6 HopByHop
+		ip6.HopByHop = &IPv6HopByHop{}
+		ip6.HopByHop.NextHeader = ip6.NextHeader
+		ip6.HopByHop.HeaderLength = 0
+		ip6.NextHeader = IPProtocolIPv6HopByHop
+	}
+	for _, t := range ip6.HopByHop.Options {
+		if t.OptionType == IPv6HopByHopOptionJumbogram {
+			tlv = t
+			break
+		}
+	}
+	if tlv == nil {
+		// Add Jumbo TLV
+		tlv = &IPv6HopByHopOption{}
+		ip6.HopByHop.Options = append(ip6.HopByHop.Options, tlv)
+	}
+	tlv.SetJumboLength(0)
+}
+
+// Set jumbo length in serialized IPv6 payload (starting with HopByHop header)
+func setIPv6PayloadJumboLength(hbh []byte) error {
+	pLen := len(hbh)
+	if pLen < 8 {
+		//HopByHop is minimum 8 bytes
+		return fmt.Errorf("Invalid IPv6 payload (length %d)", pLen)
+	}
+	hbhLen := int((hbh[1] + 1) * 8)
+	if hbhLen > pLen {
+		return fmt.Errorf("Invalid hop-by-hop length (length: %d, payload: %d", hbhLen, pLen)
+	}
+	offset := 2 //start with options
+	for offset < hbhLen {
+		opt := hbh[offset]
+		if opt == 0 {
+			//Pad1
+			offset++
+			continue
+		}
+		optLen := int(hbh[offset+1])
+		if opt == IPv6HopByHopOptionJumbogram {
+			if optLen == 4 {
+				binary.BigEndian.PutUint32(hbh[offset+2:], uint32(pLen))
+				return nil
+			}
+			return fmt.Errorf("Jumbo TLV too short (%d bytes)", optLen)
+		}
+		offset += 2 + optLen
+	}
+	return errors.New("Jumbo TLV not found")
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (ipv6 *IPv6) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var jumbo bool
+	var err error
+
+	payload := b.Bytes()
+	pLen := len(payload)
+	if pLen > ipv6MaxPayloadLength {
+		jumbo = true
+		if opts.FixLengths {
+			// We need to set the length later because the hop-by-hop header may
+			// not exist or else need padding, so pLen may yet change
+			addIPv6JumboOption(ipv6)
+		} else if ipv6.HopByHop == nil {
+			return fmt.Errorf("Cannot fit payload length of %d into IPv6 packet", pLen)
+		} else {
+			_, ok, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+			if err != nil {
+				return err
+			}
+			if !ok {
+				return errors.New("Missing jumbo length hop-by-hop option")
+			}
+		}
+	}
+
+	hbhAlreadySerialized := false
+	if ipv6.HopByHop != nil {
+		for _, l := range b.Layers() {
+			if l == LayerTypeIPv6HopByHop {
+				hbhAlreadySerialized = true
+				break
+			}
+		}
+	}
+	if ipv6.HopByHop != nil && !hbhAlreadySerialized {
+		if ipv6.NextHeader != IPProtocolIPv6HopByHop {
+			// Just fix it instead of throwing an error
+			ipv6.NextHeader = IPProtocolIPv6HopByHop
+		}
+		err = ipv6.HopByHop.SerializeTo(b, opts)
+		if err != nil {
+			return err
+		}
+		payload = b.Bytes()
+		pLen = len(payload)
+		if opts.FixLengths && jumbo {
+			err := setIPv6PayloadJumboLength(payload)
+			if err != nil {
+				return err
+			}
+		}
+	}
+
+	if !jumbo && pLen > ipv6MaxPayloadLength {
+		return errors.New("Cannot fit payload into IPv6 header")
+	}
+	bytes, err := b.PrependBytes(40)
+	if err != nil {
+		return err
+	}
+	bytes[0] = (ipv6.Version << 4) | (ipv6.TrafficClass >> 4)
+	bytes[1] = (ipv6.TrafficClass << 4) | uint8(ipv6.FlowLabel>>16)
+	binary.BigEndian.PutUint16(bytes[2:], uint16(ipv6.FlowLabel))
+	if opts.FixLengths {
+		if jumbo {
+			ipv6.Length = 0
+		} else {
+			ipv6.Length = uint16(pLen)
+		}
+	}
+	binary.BigEndian.PutUint16(bytes[4:], ipv6.Length)
+	bytes[6] = byte(ipv6.NextHeader)
+	bytes[7] = byte(ipv6.HopLimit)
+	if err := ipv6.AddressTo16(); err != nil {
+		return err
+	}
+	copy(bytes[8:], ipv6.SrcIP)
+	copy(bytes[24:], ipv6.DstIP)
+	return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 40 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid ip6 header. Length %d less than 40", len(data))
+	}
+	ipv6.Version = uint8(data[0]) >> 4
+	ipv6.TrafficClass = uint8((binary.BigEndian.Uint16(data[0:2]) >> 4) & 0x00FF)
+	ipv6.FlowLabel = binary.BigEndian.Uint32(data[0:4]) & 0x000FFFFF
+	ipv6.Length = binary.BigEndian.Uint16(data[4:6])
+	ipv6.NextHeader = IPProtocol(data[6])
+	ipv6.HopLimit = data[7]
+	ipv6.SrcIP = data[8:24]
+	ipv6.DstIP = data[24:40]
+	ipv6.HopByHop = nil
+	ipv6.BaseLayer = BaseLayer{data[:40], data[40:]}
+
+	// We treat a HopByHop IPv6 option as part of the IPv6 packet, since its
+	// options are crucial for understanding what's actually happening per packet.
+	if ipv6.NextHeader == IPProtocolIPv6HopByHop {
+		err := ipv6.hbh.DecodeFromBytes(ipv6.Payload, df)
+		if err != nil {
+			return err
+		}
+		ipv6.HopByHop = &ipv6.hbh
+		pEnd, jumbo, err := getIPv6HopByHopJumboLength(ipv6.HopByHop)
+		if err != nil {
+			return err
+		}
+		if jumbo && ipv6.Length == 0 {
+			pEnd := int(pEnd)
+			if pEnd > len(ipv6.Payload) {
+				df.SetTruncated()
+				pEnd = len(ipv6.Payload)
+			}
+			ipv6.Payload = ipv6.Payload[:pEnd]
+			return nil
+		} else if jumbo && ipv6.Length != 0 {
+			return errors.New("IPv6 has jumbo length and IPv6 length is not 0")
+		} else if !jumbo && ipv6.Length == 0 {
+			return errors.New("IPv6 length 0, but HopByHop header does not have jumbogram option")
+		} else {
+			ipv6.Payload = ipv6.Payload[ipv6.hbh.ActualLength:]
+		}
+	}
+
+	if ipv6.Length == 0 {
+		return fmt.Errorf("IPv6 length 0, but next header is %v, not HopByHop", ipv6.NextHeader)
+	}
+
+	pEnd := int(ipv6.Length)
+	if pEnd > len(ipv6.Payload) {
+		df.SetTruncated()
+		pEnd = len(ipv6.Payload)
+	}
+	ipv6.Payload = ipv6.Payload[:pEnd]
+
+	return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) CanDecode() gopacket.LayerClass {
+	return LayerTypeIPv6
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (ipv6 *IPv6) NextLayerType() gopacket.LayerType {
+	if ipv6.HopByHop != nil {
+		return ipv6.HopByHop.NextHeader.LayerType()
+	}
+	return ipv6.NextHeader.LayerType()
+}
+
+func decodeIPv6(data []byte, p gopacket.PacketBuilder) error {
+	ip6 := &IPv6{}
+	err := ip6.DecodeFromBytes(data, p)
+	p.AddLayer(ip6)
+	p.SetNetworkLayer(ip6)
+	if ip6.HopByHop != nil {
+		p.AddLayer(ip6.HopByHop)
+	}
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(ip6.NextLayerType())
+}
+
+type ipv6HeaderTLVOption struct {
+	OptionType, OptionLength uint8
+	ActualLength             int
+	OptionData               []byte
+	OptionAlignment          [2]uint8 // Xn+Y = [2]uint8{X, Y}
+}
+
+func (h *ipv6HeaderTLVOption) serializeTo(data []byte, fixLengths bool, dryrun bool) int {
+	if fixLengths {
+		h.OptionLength = uint8(len(h.OptionData))
+	}
+	length := int(h.OptionLength) + 2
+	if !dryrun {
+		data[0] = h.OptionType
+		data[1] = h.OptionLength
+		copy(data[2:], h.OptionData)
+	}
+	return length
+}
+
+func decodeIPv6HeaderTLVOption(data []byte, df gopacket.DecodeFeedback) (h *ipv6HeaderTLVOption, _ error) {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return nil, errors.New("IPv6 header option too small")
+	}
+	h = &ipv6HeaderTLVOption{}
+	if data[0] == 0 {
+		h.ActualLength = 1
+		return
+	}
+	h.OptionType = data[0]
+	h.OptionLength = data[1]
+	h.ActualLength = int(h.OptionLength) + 2
+	if len(data) < h.ActualLength {
+		df.SetTruncated()
+		return nil, errors.New("IPv6 header TLV option too small")
+	}
+	h.OptionData = data[2:h.ActualLength]
+	return
+}
+
+func serializeTLVOptionPadding(data []byte, padLength int) {
+	if padLength <= 0 {
+		return
+	}
+	if padLength == 1 {
+		data[0] = 0x0
+		return
+	}
+	tlvLength := uint8(padLength) - 2
+	data[0] = 0x1
+	data[1] = tlvLength
+	if tlvLength != 0 {
+		for k := range data[2:] {
+			data[k+2] = 0x0
+		}
+	}
+	return
+}
+
+// If buf is 'nil' do a serialize dry run
+func serializeIPv6HeaderTLVOptions(buf []byte, options []*ipv6HeaderTLVOption, fixLengths bool) int {
+	var l int
+
+	dryrun := buf == nil
+	length := 2
+	for _, opt := range options {
+		if fixLengths {
+			x := int(opt.OptionAlignment[0])
+			y := int(opt.OptionAlignment[1])
+			if x != 0 {
+				n := length / x
+				offset := x*n + y
+				if offset < length {
+					offset += x
+				}
+				if length != offset {
+					pad := offset - length
+					if !dryrun {
+						serializeTLVOptionPadding(buf[length-2:], pad)
+					}
+					length += pad
+				}
+			}
+		}
+		if dryrun {
+			l = opt.serializeTo(nil, fixLengths, true)
+		} else {
+			l = opt.serializeTo(buf[length-2:], fixLengths, false)
+		}
+		length += l
+	}
+	if fixLengths {
+		pad := length % 8
+		if pad != 0 {
+			if !dryrun {
+				serializeTLVOptionPadding(buf[length-2:], pad)
+			}
+			length += pad
+		}
+	}
+	return length - 2
+}
+
+type ipv6ExtensionBase struct {
+	BaseLayer
+	NextHeader   IPProtocol
+	HeaderLength uint8
+	ActualLength int
+}
+
+func decodeIPv6ExtensionBase(data []byte, df gopacket.DecodeFeedback) (i ipv6ExtensionBase, returnedErr error) {
+	if len(data) < 2 {
+		df.SetTruncated()
+		return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than 2", len(data))
+	}
+	i.NextHeader = IPProtocol(data[0])
+	i.HeaderLength = data[1]
+	i.ActualLength = int(i.HeaderLength)*8 + 8
+	if len(data) < i.ActualLength {
+		return ipv6ExtensionBase{}, fmt.Errorf("Invalid ip6-extension header. Length %d less than specified length %d", len(data), i.ActualLength)
+	}
+	i.Contents = data[:i.ActualLength]
+	i.Payload = data[i.ActualLength:]
+	return
+}
+
+// IPv6ExtensionSkipper is a DecodingLayer which decodes and ignores v6
+// extensions.  You can use it with a DecodingLayerParser to handle IPv6 stacks
+// which may or may not have extensions.
+type IPv6ExtensionSkipper struct {
+	NextHeader IPProtocol
+	BaseLayer
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	extension, err := decodeIPv6ExtensionBase(data, df)
+	if err != nil {
+		return err
+	}
+	i.BaseLayer = BaseLayer{data[:extension.ActualLength], data[extension.ActualLength:]}
+	i.NextHeader = extension.NextHeader
+	return nil
+}
+
+// CanDecode implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) CanDecode() gopacket.LayerClass {
+	return LayerClassIPv6Extension
+}
+
+// NextLayerType implementation according to gopacket.DecodingLayer
+func (i *IPv6ExtensionSkipper) NextLayerType() gopacket.LayerType {
+	return i.NextHeader.LayerType()
+}
+
+// IPv6HopByHopOption is a TLV option present in an IPv6 hop-by-hop extension.
+type IPv6HopByHopOption ipv6HeaderTLVOption
+
+// IPv6HopByHop is the IPv6 hop-by-hop extension.
+type IPv6HopByHop struct {
+	ipv6ExtensionBase
+	Options []*IPv6HopByHopOption
+}
+
+// LayerType returns LayerTypeIPv6HopByHop.
+func (i *IPv6HopByHop) LayerType() gopacket.LayerType { return LayerTypeIPv6HopByHop }
+
+// SerializeTo implementation according to gopacket.SerializableLayer
+func (i *IPv6HopByHop) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var bytes []byte
+	var err error
+
+	o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+	for _, v := range i.Options {
+		o = append(o, (*ipv6HeaderTLVOption)(v))
+	}
+
+	l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+	bytes, err = b.PrependBytes(l)
+	if err != nil {
+		return err
+	}
+	serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+	length := len(bytes) + 2
+	if length%8 != 0 {
+		return errors.New("IPv6HopByHop actual length must be multiple of 8")
+	}
+	bytes, err = b.PrependBytes(2)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(i.NextHeader)
+	if opts.FixLengths {
+		i.HeaderLength = uint8((length / 8) - 1)
+	}
+	bytes[1] = uint8(i.HeaderLength)
+	return nil
+}
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6HopByHop) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	var err error
+	i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+	if err != nil {
+		return err
+	}
+	i.Options = i.Options[:0]
+	offset := 2
+	for offset < i.ActualLength {
+		opt, err := decodeIPv6HeaderTLVOption(data[offset:], df)
+		if err != nil {
+			return err
+		}
+		i.Options = append(i.Options, (*IPv6HopByHopOption)(opt))
+		offset += opt.ActualLength
+	}
+	return nil
+}
+
+func decodeIPv6HopByHop(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPv6HopByHop{}
+	err := i.DecodeFromBytes(data, p)
+	p.AddLayer(i)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(i.NextHeader)
+}
+
+// SetJumboLength adds the IPv6HopByHopOptionJumbogram with the given length
+func (o *IPv6HopByHopOption) SetJumboLength(len uint32) {
+	o.OptionType = IPv6HopByHopOptionJumbogram
+	o.OptionLength = 4
+	o.ActualLength = 6
+	if o.OptionData == nil {
+		o.OptionData = make([]byte, 4)
+	}
+	binary.BigEndian.PutUint32(o.OptionData, len)
+	o.OptionAlignment = [2]uint8{4, 2}
+}
+
+// IPv6Routing is the IPv6 routing extension.
+type IPv6Routing struct {
+	ipv6ExtensionBase
+	RoutingType  uint8
+	SegmentsLeft uint8
+	// This segment is supposed to be zero according to RFC2460, the second set of
+	// 4 bytes in the extension.
+	Reserved []byte
+	// SourceRoutingIPs is the set of IPv6 addresses requested for source routing,
+	// set only if RoutingType == 0.
+	SourceRoutingIPs []net.IP
+}
+
+// LayerType returns LayerTypeIPv6Routing.
+func (i *IPv6Routing) LayerType() gopacket.LayerType { return LayerTypeIPv6Routing }
+
+func decodeIPv6Routing(data []byte, p gopacket.PacketBuilder) error {
+	base, err := decodeIPv6ExtensionBase(data, p)
+	if err != nil {
+		return err
+	}
+	i := &IPv6Routing{
+		ipv6ExtensionBase: base,
+		RoutingType:       data[2],
+		SegmentsLeft:      data[3],
+		Reserved:          data[4:8],
+	}
+	switch i.RoutingType {
+	case 0: // Source routing
+		if (i.ActualLength-8)%16 != 0 {
+			return fmt.Errorf("Invalid IPv6 source routing, length of type 0 packet %d", i.ActualLength)
+		}
+		for d := i.Contents[8:]; len(d) >= 16; d = d[16:] {
+			i.SourceRoutingIPs = append(i.SourceRoutingIPs, net.IP(d[:16]))
+		}
+	default:
+		return fmt.Errorf("Unknown IPv6 routing header type %d", i.RoutingType)
+	}
+	p.AddLayer(i)
+	return p.NextDecoder(i.NextHeader)
+}
+
+// IPv6Fragment is the IPv6 fragment header, used for packet
+// fragmentation/defragmentation.
+type IPv6Fragment struct {
+	BaseLayer
+	NextHeader IPProtocol
+	// Reserved1 is bits [8-16), from least to most significant, 0-indexed
+	Reserved1      uint8
+	FragmentOffset uint16
+	// Reserved2 is bits [29-31), from least to most significant, 0-indexed
+	Reserved2      uint8
+	MoreFragments  bool
+	Identification uint32
+}
+
+// LayerType returns LayerTypeIPv6Fragment.
+func (i *IPv6Fragment) LayerType() gopacket.LayerType { return LayerTypeIPv6Fragment }
+
+func decodeIPv6Fragment(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 8 {
+		p.SetTruncated()
+		return fmt.Errorf("Invalid ip6-fragment header. Length %d less than 8", len(data))
+	}
+	i := &IPv6Fragment{
+		BaseLayer:      BaseLayer{data[:8], data[8:]},
+		NextHeader:     IPProtocol(data[0]),
+		Reserved1:      data[1],
+		FragmentOffset: binary.BigEndian.Uint16(data[2:4]) >> 3,
+		Reserved2:      data[3] & 0x6 >> 1,
+		MoreFragments:  data[3]&0x1 != 0,
+		Identification: binary.BigEndian.Uint32(data[4:8]),
+	}
+	p.AddLayer(i)
+	return p.NextDecoder(gopacket.DecodeFragment)
+}
+
+// IPv6DestinationOption is a TLV option present in an IPv6 destination options extension.
+type IPv6DestinationOption ipv6HeaderTLVOption
+
+// IPv6Destination is the IPv6 destination options header.
+type IPv6Destination struct {
+	ipv6ExtensionBase
+	Options []*IPv6DestinationOption
+}
+
+// LayerType returns LayerTypeIPv6Destination.
+func (i *IPv6Destination) LayerType() gopacket.LayerType { return LayerTypeIPv6Destination }
+
+// DecodeFromBytes implementation according to gopacket.DecodingLayer
+func (i *IPv6Destination) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	var err error
+	i.ipv6ExtensionBase, err = decodeIPv6ExtensionBase(data, df)
+	if err != nil {
+		return err
+	}
+	offset := 2
+	for offset < i.ActualLength {
+		opt, err := decodeIPv6HeaderTLVOption(data[offset:], df)
+		if err != nil {
+			return err
+		}
+		i.Options = append(i.Options, (*IPv6DestinationOption)(opt))
+		offset += opt.ActualLength
+	}
+	return nil
+}
+
+func decodeIPv6Destination(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPv6Destination{}
+	err := i.DecodeFromBytes(data, p)
+	p.AddLayer(i)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(i.NextHeader)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (i *IPv6Destination) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var bytes []byte
+	var err error
+
+	o := make([]*ipv6HeaderTLVOption, 0, len(i.Options))
+	for _, v := range i.Options {
+		o = append(o, (*ipv6HeaderTLVOption)(v))
+	}
+
+	l := serializeIPv6HeaderTLVOptions(nil, o, opts.FixLengths)
+	bytes, err = b.PrependBytes(l)
+	if err != nil {
+		return err
+	}
+	serializeIPv6HeaderTLVOptions(bytes, o, opts.FixLengths)
+
+	length := len(bytes) + 2
+	if length%8 != 0 {
+		return errors.New("IPv6Destination actual length must be multiple of 8")
+	}
+	bytes, err = b.PrependBytes(2)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(i.NextHeader)
+	if opts.FixLengths {
+		i.HeaderLength = uint8((length / 8) - 1)
+	}
+	bytes[1] = uint8(i.HeaderLength)
+	return nil
+}
+
+func checkIPv6Address(addr net.IP) error {
+	if len(addr) == net.IPv6len {
+		return nil
+	}
+	if len(addr) == net.IPv4len {
+		return errors.New("address is IPv4")
+	}
+	return fmt.Errorf("wrong length of %d bytes instead of %d", len(addr), net.IPv6len)
+}
+
+// AddressTo16 ensures IPv6.SrcIP and IPv6.DstIP are actually IPv6 addresses (i.e. 16 byte addresses)
+func (ipv6 *IPv6) AddressTo16() error {
+	if err := checkIPv6Address(ipv6.SrcIP); err != nil {
+		return fmt.Errorf("Invalid source IPv6 address (%s)", err)
+	}
+	if err := checkIPv6Address(ipv6.DstIP); err != nil {
+		return fmt.Errorf("Invalid destination IPv6 address (%s)", err)
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ipsec.go b/vendor/github.com/google/gopacket/layers/ipsec.go
new file mode 100644
index 0000000..12f31ca
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ipsec.go
@@ -0,0 +1,77 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+// IPSecAH is the authentication header for IPv4/6 defined in
+// http://tools.ietf.org/html/rfc2402
+type IPSecAH struct {
+	// While the auth header can be used for both IPv4 and v6, its format is that of
+	// an IPv6 extension (NextHeader, PayloadLength, etc...), so we use ipv6ExtensionBase
+	// to build it.
+	ipv6ExtensionBase
+	Reserved           uint16
+	SPI, Seq           uint32
+	AuthenticationData []byte
+}
+
+// LayerType returns LayerTypeIPSecAH.
+func (i *IPSecAH) LayerType() gopacket.LayerType { return LayerTypeIPSecAH }
+
+func decodeIPSecAH(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 12 {
+		p.SetTruncated()
+		return errors.New("IPSec AH packet less than 12 bytes")
+	}
+	i := &IPSecAH{
+		ipv6ExtensionBase: ipv6ExtensionBase{
+			NextHeader:   IPProtocol(data[0]),
+			HeaderLength: data[1],
+		},
+		Reserved: binary.BigEndian.Uint16(data[2:4]),
+		SPI:      binary.BigEndian.Uint32(data[4:8]),
+		Seq:      binary.BigEndian.Uint32(data[8:12]),
+	}
+	i.ActualLength = (int(i.HeaderLength) + 2) * 4
+	if len(data) < i.ActualLength {
+		p.SetTruncated()
+		return errors.New("Truncated AH packet < ActualLength")
+	}
+	i.AuthenticationData = data[12:i.ActualLength]
+	i.Contents = data[:i.ActualLength]
+	i.Payload = data[i.ActualLength:]
+	p.AddLayer(i)
+	return p.NextDecoder(i.NextHeader)
+}
+
+// IPSecESP is the encapsulating security payload defined in
+// http://tools.ietf.org/html/rfc2406
+type IPSecESP struct {
+	BaseLayer
+	SPI, Seq uint32
+	// Encrypted contains the encrypted set of bytes sent in an ESP
+	Encrypted []byte
+}
+
+// LayerType returns LayerTypeIPSecESP.
+func (i *IPSecESP) LayerType() gopacket.LayerType { return LayerTypeIPSecESP }
+
+func decodeIPSecESP(data []byte, p gopacket.PacketBuilder) error {
+	i := &IPSecESP{
+		BaseLayer: BaseLayer{data, nil},
+		SPI:       binary.BigEndian.Uint32(data[:4]),
+		Seq:       binary.BigEndian.Uint32(data[4:8]),
+		Encrypted: data[8:],
+	}
+	p.AddLayer(i)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/layertypes.go b/vendor/github.com/google/gopacket/layers/layertypes.go
new file mode 100644
index 0000000..69d25ae
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/layertypes.go
@@ -0,0 +1,223 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+var (
+	LayerTypeARP                          = gopacket.RegisterLayerType(10, gopacket.LayerTypeMetadata{Name: "ARP", Decoder: gopacket.DecodeFunc(decodeARP)})
+	LayerTypeCiscoDiscovery               = gopacket.RegisterLayerType(11, gopacket.LayerTypeMetadata{Name: "CiscoDiscovery", Decoder: gopacket.DecodeFunc(decodeCiscoDiscovery)})
+	LayerTypeEthernetCTP                  = gopacket.RegisterLayerType(12, gopacket.LayerTypeMetadata{Name: "EthernetCTP", Decoder: gopacket.DecodeFunc(decodeEthernetCTP)})
+	LayerTypeEthernetCTPForwardData       = gopacket.RegisterLayerType(13, gopacket.LayerTypeMetadata{Name: "EthernetCTPForwardData", Decoder: nil})
+	LayerTypeEthernetCTPReply             = gopacket.RegisterLayerType(14, gopacket.LayerTypeMetadata{Name: "EthernetCTPReply", Decoder: nil})
+	LayerTypeDot1Q                        = gopacket.RegisterLayerType(15, gopacket.LayerTypeMetadata{Name: "Dot1Q", Decoder: gopacket.DecodeFunc(decodeDot1Q)})
+	LayerTypeEtherIP                      = gopacket.RegisterLayerType(16, gopacket.LayerTypeMetadata{Name: "EtherIP", Decoder: gopacket.DecodeFunc(decodeEtherIP)})
+	LayerTypeEthernet                     = gopacket.RegisterLayerType(17, gopacket.LayerTypeMetadata{Name: "Ethernet", Decoder: gopacket.DecodeFunc(decodeEthernet)})
+	LayerTypeGRE                          = gopacket.RegisterLayerType(18, gopacket.LayerTypeMetadata{Name: "GRE", Decoder: gopacket.DecodeFunc(decodeGRE)})
+	LayerTypeICMPv4                       = gopacket.RegisterLayerType(19, gopacket.LayerTypeMetadata{Name: "ICMPv4", Decoder: gopacket.DecodeFunc(decodeICMPv4)})
+	LayerTypeIPv4                         = gopacket.RegisterLayerType(20, gopacket.LayerTypeMetadata{Name: "IPv4", Decoder: gopacket.DecodeFunc(decodeIPv4)})
+	LayerTypeIPv6                         = gopacket.RegisterLayerType(21, gopacket.LayerTypeMetadata{Name: "IPv6", Decoder: gopacket.DecodeFunc(decodeIPv6)})
+	LayerTypeLLC                          = gopacket.RegisterLayerType(22, gopacket.LayerTypeMetadata{Name: "LLC", Decoder: gopacket.DecodeFunc(decodeLLC)})
+	LayerTypeSNAP                         = gopacket.RegisterLayerType(23, gopacket.LayerTypeMetadata{Name: "SNAP", Decoder: gopacket.DecodeFunc(decodeSNAP)})
+	LayerTypeMPLS                         = gopacket.RegisterLayerType(24, gopacket.LayerTypeMetadata{Name: "MPLS", Decoder: gopacket.DecodeFunc(decodeMPLS)})
+	LayerTypePPP                          = gopacket.RegisterLayerType(25, gopacket.LayerTypeMetadata{Name: "PPP", Decoder: gopacket.DecodeFunc(decodePPP)})
+	LayerTypePPPoE                        = gopacket.RegisterLayerType(26, gopacket.LayerTypeMetadata{Name: "PPPoE", Decoder: gopacket.DecodeFunc(decodePPPoE)})
+	LayerTypeRUDP                         = gopacket.RegisterLayerType(27, gopacket.LayerTypeMetadata{Name: "RUDP", Decoder: gopacket.DecodeFunc(decodeRUDP)})
+	LayerTypeSCTP                         = gopacket.RegisterLayerType(28, gopacket.LayerTypeMetadata{Name: "SCTP", Decoder: gopacket.DecodeFunc(decodeSCTP)})
+	LayerTypeSCTPUnknownChunkType         = gopacket.RegisterLayerType(29, gopacket.LayerTypeMetadata{Name: "SCTPUnknownChunkType", Decoder: nil})
+	LayerTypeSCTPData                     = gopacket.RegisterLayerType(30, gopacket.LayerTypeMetadata{Name: "SCTPData", Decoder: nil})
+	LayerTypeSCTPInit                     = gopacket.RegisterLayerType(31, gopacket.LayerTypeMetadata{Name: "SCTPInit", Decoder: nil})
+	LayerTypeSCTPSack                     = gopacket.RegisterLayerType(32, gopacket.LayerTypeMetadata{Name: "SCTPSack", Decoder: nil})
+	LayerTypeSCTPHeartbeat                = gopacket.RegisterLayerType(33, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeat", Decoder: nil})
+	LayerTypeSCTPError                    = gopacket.RegisterLayerType(34, gopacket.LayerTypeMetadata{Name: "SCTPError", Decoder: nil})
+	LayerTypeSCTPShutdown                 = gopacket.RegisterLayerType(35, gopacket.LayerTypeMetadata{Name: "SCTPShutdown", Decoder: nil})
+	LayerTypeSCTPShutdownAck              = gopacket.RegisterLayerType(36, gopacket.LayerTypeMetadata{Name: "SCTPShutdownAck", Decoder: nil})
+	LayerTypeSCTPCookieEcho               = gopacket.RegisterLayerType(37, gopacket.LayerTypeMetadata{Name: "SCTPCookieEcho", Decoder: nil})
+	LayerTypeSCTPEmptyLayer               = gopacket.RegisterLayerType(38, gopacket.LayerTypeMetadata{Name: "SCTPEmptyLayer", Decoder: nil})
+	LayerTypeSCTPInitAck                  = gopacket.RegisterLayerType(39, gopacket.LayerTypeMetadata{Name: "SCTPInitAck", Decoder: nil})
+	LayerTypeSCTPHeartbeatAck             = gopacket.RegisterLayerType(40, gopacket.LayerTypeMetadata{Name: "SCTPHeartbeatAck", Decoder: nil})
+	LayerTypeSCTPAbort                    = gopacket.RegisterLayerType(41, gopacket.LayerTypeMetadata{Name: "SCTPAbort", Decoder: nil})
+	LayerTypeSCTPShutdownComplete         = gopacket.RegisterLayerType(42, gopacket.LayerTypeMetadata{Name: "SCTPShutdownComplete", Decoder: nil})
+	LayerTypeSCTPCookieAck                = gopacket.RegisterLayerType(43, gopacket.LayerTypeMetadata{Name: "SCTPCookieAck", Decoder: nil})
+	LayerTypeTCP                          = gopacket.RegisterLayerType(44, gopacket.LayerTypeMetadata{Name: "TCP", Decoder: gopacket.DecodeFunc(decodeTCP)})
+	LayerTypeUDP                          = gopacket.RegisterLayerType(45, gopacket.LayerTypeMetadata{Name: "UDP", Decoder: gopacket.DecodeFunc(decodeUDP)})
+	LayerTypeIPv6HopByHop                 = gopacket.RegisterLayerType(46, gopacket.LayerTypeMetadata{Name: "IPv6HopByHop", Decoder: gopacket.DecodeFunc(decodeIPv6HopByHop)})
+	LayerTypeIPv6Routing                  = gopacket.RegisterLayerType(47, gopacket.LayerTypeMetadata{Name: "IPv6Routing", Decoder: gopacket.DecodeFunc(decodeIPv6Routing)})
+	LayerTypeIPv6Fragment                 = gopacket.RegisterLayerType(48, gopacket.LayerTypeMetadata{Name: "IPv6Fragment", Decoder: gopacket.DecodeFunc(decodeIPv6Fragment)})
+	LayerTypeIPv6Destination              = gopacket.RegisterLayerType(49, gopacket.LayerTypeMetadata{Name: "IPv6Destination", Decoder: gopacket.DecodeFunc(decodeIPv6Destination)})
+	LayerTypeIPSecAH                      = gopacket.RegisterLayerType(50, gopacket.LayerTypeMetadata{Name: "IPSecAH", Decoder: gopacket.DecodeFunc(decodeIPSecAH)})
+	LayerTypeIPSecESP                     = gopacket.RegisterLayerType(51, gopacket.LayerTypeMetadata{Name: "IPSecESP", Decoder: gopacket.DecodeFunc(decodeIPSecESP)})
+	LayerTypeUDPLite                      = gopacket.RegisterLayerType(52, gopacket.LayerTypeMetadata{Name: "UDPLite", Decoder: gopacket.DecodeFunc(decodeUDPLite)})
+	LayerTypeFDDI                         = gopacket.RegisterLayerType(53, gopacket.LayerTypeMetadata{Name: "FDDI", Decoder: gopacket.DecodeFunc(decodeFDDI)})
+	LayerTypeLoopback                     = gopacket.RegisterLayerType(54, gopacket.LayerTypeMetadata{Name: "Loopback", Decoder: gopacket.DecodeFunc(decodeLoopback)})
+	LayerTypeEAP                          = gopacket.RegisterLayerType(55, gopacket.LayerTypeMetadata{Name: "EAP", Decoder: gopacket.DecodeFunc(decodeEAP)})
+	LayerTypeEAPOL                        = gopacket.RegisterLayerType(56, gopacket.LayerTypeMetadata{Name: "EAPOL", Decoder: gopacket.DecodeFunc(decodeEAPOL)})
+	LayerTypeICMPv6                       = gopacket.RegisterLayerType(57, gopacket.LayerTypeMetadata{Name: "ICMPv6", Decoder: gopacket.DecodeFunc(decodeICMPv6)})
+	LayerTypeLinkLayerDiscovery           = gopacket.RegisterLayerType(58, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscovery", Decoder: gopacket.DecodeFunc(decodeLinkLayerDiscovery)})
+	LayerTypeCiscoDiscoveryInfo           = gopacket.RegisterLayerType(59, gopacket.LayerTypeMetadata{Name: "CiscoDiscoveryInfo", Decoder: gopacket.DecodeFunc(decodeCiscoDiscoveryInfo)})
+	LayerTypeLinkLayerDiscoveryInfo       = gopacket.RegisterLayerType(60, gopacket.LayerTypeMetadata{Name: "LinkLayerDiscoveryInfo", Decoder: nil})
+	LayerTypeNortelDiscovery              = gopacket.RegisterLayerType(61, gopacket.LayerTypeMetadata{Name: "NortelDiscovery", Decoder: gopacket.DecodeFunc(decodeNortelDiscovery)})
+	LayerTypeIGMP                         = gopacket.RegisterLayerType(62, gopacket.LayerTypeMetadata{Name: "IGMP", Decoder: gopacket.DecodeFunc(decodeIGMP)})
+	LayerTypePFLog                        = gopacket.RegisterLayerType(63, gopacket.LayerTypeMetadata{Name: "PFLog", Decoder: gopacket.DecodeFunc(decodePFLog)})
+	LayerTypeRadioTap                     = gopacket.RegisterLayerType(64, gopacket.LayerTypeMetadata{Name: "RadioTap", Decoder: gopacket.DecodeFunc(decodeRadioTap)})
+	LayerTypeDot11                        = gopacket.RegisterLayerType(65, gopacket.LayerTypeMetadata{Name: "Dot11", Decoder: gopacket.DecodeFunc(decodeDot11)})
+	LayerTypeDot11Ctrl                    = gopacket.RegisterLayerType(66, gopacket.LayerTypeMetadata{Name: "Dot11Ctrl", Decoder: gopacket.DecodeFunc(decodeDot11Ctrl)})
+	LayerTypeDot11Data                    = gopacket.RegisterLayerType(67, gopacket.LayerTypeMetadata{Name: "Dot11Data", Decoder: gopacket.DecodeFunc(decodeDot11Data)})
+	LayerTypeDot11DataCFAck               = gopacket.RegisterLayerType(68, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+	LayerTypeDot11DataCFPoll              = gopacket.RegisterLayerType(69, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+	LayerTypeDot11DataCFAckPoll           = gopacket.RegisterLayerType(70, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+	LayerTypeDot11DataNull                = gopacket.RegisterLayerType(71, gopacket.LayerTypeMetadata{Name: "Dot11DataNull", Decoder: gopacket.DecodeFunc(decodeDot11DataNull)})
+	LayerTypeDot11DataCFAckNoData         = gopacket.RegisterLayerType(72, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAck)})
+	LayerTypeDot11DataCFPollNoData        = gopacket.RegisterLayerType(73, gopacket.LayerTypeMetadata{Name: "Dot11DataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFPoll)})
+	LayerTypeDot11DataCFAckPollNoData     = gopacket.RegisterLayerType(74, gopacket.LayerTypeMetadata{Name: "Dot11DataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataCFAckPoll)})
+	LayerTypeDot11DataQOSData             = gopacket.RegisterLayerType(75, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSData", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSData)})
+	LayerTypeDot11DataQOSDataCFAck        = gopacket.RegisterLayerType(76, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAck", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAck)})
+	LayerTypeDot11DataQOSDataCFPoll       = gopacket.RegisterLayerType(77, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFPoll)})
+	LayerTypeDot11DataQOSDataCFAckPoll    = gopacket.RegisterLayerType(78, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSDataCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSDataCFAckPoll)})
+	LayerTypeDot11DataQOSNull             = gopacket.RegisterLayerType(79, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSNull", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSNull)})
+	LayerTypeDot11DataQOSCFPollNoData     = gopacket.RegisterLayerType(80, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFPollNoData)})
+	LayerTypeDot11DataQOSCFAckPollNoData  = gopacket.RegisterLayerType(81, gopacket.LayerTypeMetadata{Name: "Dot11DataQOSCFAckPoll", Decoder: gopacket.DecodeFunc(decodeDot11DataQOSCFAckPollNoData)})
+	LayerTypeDot11InformationElement      = gopacket.RegisterLayerType(82, gopacket.LayerTypeMetadata{Name: "Dot11InformationElement", Decoder: gopacket.DecodeFunc(decodeDot11InformationElement)})
+	LayerTypeDot11CtrlCTS                 = gopacket.RegisterLayerType(83, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCTS)})
+	LayerTypeDot11CtrlRTS                 = gopacket.RegisterLayerType(84, gopacket.LayerTypeMetadata{Name: "Dot11CtrlRTS", Decoder: gopacket.DecodeFunc(decodeDot11CtrlRTS)})
+	LayerTypeDot11CtrlBlockAckReq         = gopacket.RegisterLayerType(85, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAckReq", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAckReq)})
+	LayerTypeDot11CtrlBlockAck            = gopacket.RegisterLayerType(86, gopacket.LayerTypeMetadata{Name: "Dot11CtrlBlockAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlBlockAck)})
+	LayerTypeDot11CtrlPowersavePoll       = gopacket.RegisterLayerType(87, gopacket.LayerTypeMetadata{Name: "Dot11CtrlPowersavePoll", Decoder: gopacket.DecodeFunc(decodeDot11CtrlPowersavePoll)})
+	LayerTypeDot11CtrlAck                 = gopacket.RegisterLayerType(88, gopacket.LayerTypeMetadata{Name: "Dot11CtrlAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlAck)})
+	LayerTypeDot11CtrlCFEnd               = gopacket.RegisterLayerType(89, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEnd", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEnd)})
+	LayerTypeDot11CtrlCFEndAck            = gopacket.RegisterLayerType(90, gopacket.LayerTypeMetadata{Name: "Dot11CtrlCFEndAck", Decoder: gopacket.DecodeFunc(decodeDot11CtrlCFEndAck)})
+	LayerTypeDot11MgmtAssociationReq      = gopacket.RegisterLayerType(91, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationReq)})
+	LayerTypeDot11MgmtAssociationResp     = gopacket.RegisterLayerType(92, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAssociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAssociationResp)})
+	LayerTypeDot11MgmtReassociationReq    = gopacket.RegisterLayerType(93, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationReq)})
+	LayerTypeDot11MgmtReassociationResp   = gopacket.RegisterLayerType(94, gopacket.LayerTypeMetadata{Name: "Dot11MgmtReassociationResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtReassociationResp)})
+	LayerTypeDot11MgmtProbeReq            = gopacket.RegisterLayerType(95, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeReq", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeReq)})
+	LayerTypeDot11MgmtProbeResp           = gopacket.RegisterLayerType(96, gopacket.LayerTypeMetadata{Name: "Dot11MgmtProbeResp", Decoder: gopacket.DecodeFunc(decodeDot11MgmtProbeResp)})
+	LayerTypeDot11MgmtMeasurementPilot    = gopacket.RegisterLayerType(97, gopacket.LayerTypeMetadata{Name: "Dot11MgmtMeasurementPilot", Decoder: gopacket.DecodeFunc(decodeDot11MgmtMeasurementPilot)})
+	LayerTypeDot11MgmtBeacon              = gopacket.RegisterLayerType(98, gopacket.LayerTypeMetadata{Name: "Dot11MgmtBeacon", Decoder: gopacket.DecodeFunc(decodeDot11MgmtBeacon)})
+	LayerTypeDot11MgmtATIM                = gopacket.RegisterLayerType(99, gopacket.LayerTypeMetadata{Name: "Dot11MgmtATIM", Decoder: gopacket.DecodeFunc(decodeDot11MgmtATIM)})
+	LayerTypeDot11MgmtDisassociation      = gopacket.RegisterLayerType(100, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDisassociation", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDisassociation)})
+	LayerTypeDot11MgmtAuthentication      = gopacket.RegisterLayerType(101, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAuthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAuthentication)})
+	LayerTypeDot11MgmtDeauthentication    = gopacket.RegisterLayerType(102, gopacket.LayerTypeMetadata{Name: "Dot11MgmtDeauthentication", Decoder: gopacket.DecodeFunc(decodeDot11MgmtDeauthentication)})
+	LayerTypeDot11MgmtAction              = gopacket.RegisterLayerType(103, gopacket.LayerTypeMetadata{Name: "Dot11MgmtAction", Decoder: gopacket.DecodeFunc(decodeDot11MgmtAction)})
+	LayerTypeDot11MgmtActionNoAck         = gopacket.RegisterLayerType(104, gopacket.LayerTypeMetadata{Name: "Dot11MgmtActionNoAck", Decoder: gopacket.DecodeFunc(decodeDot11MgmtActionNoAck)})
+	LayerTypeDot11MgmtArubaWLAN           = gopacket.RegisterLayerType(105, gopacket.LayerTypeMetadata{Name: "Dot11MgmtArubaWLAN", Decoder: gopacket.DecodeFunc(decodeDot11MgmtArubaWLAN)})
+	LayerTypeDot11WEP                     = gopacket.RegisterLayerType(106, gopacket.LayerTypeMetadata{Name: "Dot11WEP", Decoder: gopacket.DecodeFunc(decodeDot11WEP)})
+	LayerTypeDNS                          = gopacket.RegisterLayerType(107, gopacket.LayerTypeMetadata{Name: "DNS", Decoder: gopacket.DecodeFunc(decodeDNS)})
+	LayerTypeUSB                          = gopacket.RegisterLayerType(108, gopacket.LayerTypeMetadata{Name: "USB", Decoder: gopacket.DecodeFunc(decodeUSB)})
+	LayerTypeUSBRequestBlockSetup         = gopacket.RegisterLayerType(109, gopacket.LayerTypeMetadata{Name: "USBRequestBlockSetup", Decoder: gopacket.DecodeFunc(decodeUSBRequestBlockSetup)})
+	LayerTypeUSBControl                   = gopacket.RegisterLayerType(110, gopacket.LayerTypeMetadata{Name: "USBControl", Decoder: gopacket.DecodeFunc(decodeUSBControl)})
+	LayerTypeUSBInterrupt                 = gopacket.RegisterLayerType(111, gopacket.LayerTypeMetadata{Name: "USBInterrupt", Decoder: gopacket.DecodeFunc(decodeUSBInterrupt)})
+	LayerTypeUSBBulk                      = gopacket.RegisterLayerType(112, gopacket.LayerTypeMetadata{Name: "USBBulk", Decoder: gopacket.DecodeFunc(decodeUSBBulk)})
+	LayerTypeLinuxSLL                     = gopacket.RegisterLayerType(113, gopacket.LayerTypeMetadata{Name: "Linux SLL", Decoder: gopacket.DecodeFunc(decodeLinuxSLL)})
+	LayerTypeSFlow                        = gopacket.RegisterLayerType(114, gopacket.LayerTypeMetadata{Name: "SFlow", Decoder: gopacket.DecodeFunc(decodeSFlow)})
+	LayerTypePrismHeader                  = gopacket.RegisterLayerType(115, gopacket.LayerTypeMetadata{Name: "Prism monitor mode header", Decoder: gopacket.DecodeFunc(decodePrismHeader)})
+	LayerTypeVXLAN                        = gopacket.RegisterLayerType(116, gopacket.LayerTypeMetadata{Name: "VXLAN", Decoder: gopacket.DecodeFunc(decodeVXLAN)})
+	LayerTypeNTP                          = gopacket.RegisterLayerType(117, gopacket.LayerTypeMetadata{Name: "NTP", Decoder: gopacket.DecodeFunc(decodeNTP)})
+	LayerTypeDHCPv4                       = gopacket.RegisterLayerType(118, gopacket.LayerTypeMetadata{Name: "DHCPv4", Decoder: gopacket.DecodeFunc(decodeDHCPv4)})
+	LayerTypeVRRP                         = gopacket.RegisterLayerType(119, gopacket.LayerTypeMetadata{Name: "VRRP", Decoder: gopacket.DecodeFunc(decodeVRRP)})
+	LayerTypeGeneve                       = gopacket.RegisterLayerType(120, gopacket.LayerTypeMetadata{Name: "Geneve", Decoder: gopacket.DecodeFunc(decodeGeneve)})
+	LayerTypeSTP                          = gopacket.RegisterLayerType(121, gopacket.LayerTypeMetadata{Name: "STP", Decoder: gopacket.DecodeFunc(decodeSTP)})
+	LayerTypeBFD                          = gopacket.RegisterLayerType(122, gopacket.LayerTypeMetadata{Name: "BFD", Decoder: gopacket.DecodeFunc(decodeBFD)})
+	LayerTypeOSPF                         = gopacket.RegisterLayerType(123, gopacket.LayerTypeMetadata{Name: "OSPF", Decoder: gopacket.DecodeFunc(decodeOSPF)})
+	LayerTypeICMPv6RouterSolicitation     = gopacket.RegisterLayerType(124, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterSolicitation)})
+	LayerTypeICMPv6RouterAdvertisement    = gopacket.RegisterLayerType(125, gopacket.LayerTypeMetadata{Name: "ICMPv6RouterAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6RouterAdvertisement)})
+	LayerTypeICMPv6NeighborSolicitation   = gopacket.RegisterLayerType(126, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborSolicitation", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborSolicitation)})
+	LayerTypeICMPv6NeighborAdvertisement  = gopacket.RegisterLayerType(127, gopacket.LayerTypeMetadata{Name: "ICMPv6NeighborAdvertisement", Decoder: gopacket.DecodeFunc(decodeICMPv6NeighborAdvertisement)})
+	LayerTypeICMPv6Redirect               = gopacket.RegisterLayerType(128, gopacket.LayerTypeMetadata{Name: "ICMPv6Redirect", Decoder: gopacket.DecodeFunc(decodeICMPv6Redirect)})
+	LayerTypeGTPv1U                       = gopacket.RegisterLayerType(129, gopacket.LayerTypeMetadata{Name: "GTPv1U", Decoder: gopacket.DecodeFunc(decodeGTPv1u)})
+	LayerTypeEAPOLKey                     = gopacket.RegisterLayerType(130, gopacket.LayerTypeMetadata{Name: "EAPOLKey", Decoder: gopacket.DecodeFunc(decodeEAPOLKey)})
+	LayerTypeLCM                          = gopacket.RegisterLayerType(131, gopacket.LayerTypeMetadata{Name: "LCM", Decoder: gopacket.DecodeFunc(decodeLCM)})
+	LayerTypeICMPv6Echo                   = gopacket.RegisterLayerType(132, gopacket.LayerTypeMetadata{Name: "ICMPv6Echo", Decoder: gopacket.DecodeFunc(decodeICMPv6Echo)})
+	LayerTypeSIP                          = gopacket.RegisterLayerType(133, gopacket.LayerTypeMetadata{Name: "SIP", Decoder: gopacket.DecodeFunc(decodeSIP)})
+	LayerTypeDHCPv6                       = gopacket.RegisterLayerType(134, gopacket.LayerTypeMetadata{Name: "DHCPv6", Decoder: gopacket.DecodeFunc(decodeDHCPv6)})
+	LayerTypeMLDv1MulticastListenerReport = gopacket.RegisterLayerType(135, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerReport)})
+	LayerTypeMLDv1MulticastListenerDone   = gopacket.RegisterLayerType(136, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerDone", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerDone)})
+	LayerTypeMLDv1MulticastListenerQuery  = gopacket.RegisterLayerType(137, gopacket.LayerTypeMetadata{Name: "MLDv1MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv1MulticastListenerQuery)})
+	LayerTypeMLDv2MulticastListenerReport = gopacket.RegisterLayerType(138, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerReport", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerReport)})
+	LayerTypeMLDv2MulticastListenerQuery  = gopacket.RegisterLayerType(139, gopacket.LayerTypeMetadata{Name: "MLDv2MulticastListenerQuery", Decoder: gopacket.DecodeFunc(decodeMLDv2MulticastListenerQuery)})
+	LayerTypeTLS                          = gopacket.RegisterLayerType(140, gopacket.LayerTypeMetadata{Name: "TLS", Decoder: gopacket.DecodeFunc(decodeTLS)})
+	LayerTypeModbusTCP                    = gopacket.RegisterLayerType(141, gopacket.LayerTypeMetadata{Name: "ModbusTCP", Decoder: gopacket.DecodeFunc(decodeModbusTCP)})
+	LayerTypeRMCP                         = gopacket.RegisterLayerType(142, gopacket.LayerTypeMetadata{Name: "RMCP", Decoder: gopacket.DecodeFunc(decodeRMCP)})
+	LayerTypeASF                          = gopacket.RegisterLayerType(143, gopacket.LayerTypeMetadata{Name: "ASF", Decoder: gopacket.DecodeFunc(decodeASF)})
+	LayerTypeASFPresencePong              = gopacket.RegisterLayerType(144, gopacket.LayerTypeMetadata{Name: "ASFPresencePong", Decoder: gopacket.DecodeFunc(decodeASFPresencePong)})
+	LayerTypeERSPANII                     = gopacket.RegisterLayerType(145, gopacket.LayerTypeMetadata{Name: "ERSPAN Type II", Decoder: gopacket.DecodeFunc(decodeERSPANII)})
+	LayerTypeRADIUS                       = gopacket.RegisterLayerType(146, gopacket.LayerTypeMetadata{Name: "RADIUS", Decoder: gopacket.DecodeFunc(decodeRADIUS)})
+)
+
+var (
+	// LayerClassIPNetwork contains TCP/IP network layer types.
+	LayerClassIPNetwork = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeIPv4,
+		LayerTypeIPv6,
+	})
+	// LayerClassIPTransport contains TCP/IP transport layer types.
+	LayerClassIPTransport = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeTCP,
+		LayerTypeUDP,
+		LayerTypeSCTP,
+	})
+	// LayerClassIPControl contains TCP/IP control protocols.
+	LayerClassIPControl = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeICMPv4,
+		LayerTypeICMPv6,
+	})
+	// LayerClassSCTPChunk contains SCTP chunk types (not the top-level SCTP
+	// layer).
+	LayerClassSCTPChunk = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeSCTPUnknownChunkType,
+		LayerTypeSCTPData,
+		LayerTypeSCTPInit,
+		LayerTypeSCTPSack,
+		LayerTypeSCTPHeartbeat,
+		LayerTypeSCTPError,
+		LayerTypeSCTPShutdown,
+		LayerTypeSCTPShutdownAck,
+		LayerTypeSCTPCookieEcho,
+		LayerTypeSCTPEmptyLayer,
+		LayerTypeSCTPInitAck,
+		LayerTypeSCTPHeartbeatAck,
+		LayerTypeSCTPAbort,
+		LayerTypeSCTPShutdownComplete,
+		LayerTypeSCTPCookieAck,
+	})
+	// LayerClassIPv6Extension contains IPv6 extension headers.
+	LayerClassIPv6Extension = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeIPv6HopByHop,
+		LayerTypeIPv6Routing,
+		LayerTypeIPv6Fragment,
+		LayerTypeIPv6Destination,
+	})
+	LayerClassIPSec = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeIPSecAH,
+		LayerTypeIPSecESP,
+	})
+	// LayerClassICMPv6NDP contains ICMPv6 neighbor discovery protocol
+	// messages.
+	LayerClassICMPv6NDP = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeICMPv6RouterSolicitation,
+		LayerTypeICMPv6RouterAdvertisement,
+		LayerTypeICMPv6NeighborSolicitation,
+		LayerTypeICMPv6NeighborAdvertisement,
+		LayerTypeICMPv6Redirect,
+	})
+	// LayerClassMLDv1 contains multicast listener discovery protocol
+	LayerClassMLDv1 = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeMLDv1MulticastListenerQuery,
+		LayerTypeMLDv1MulticastListenerReport,
+		LayerTypeMLDv1MulticastListenerDone,
+	})
+	// LayerClassMLDv2 contains multicast listener discovery protocol v2
+	LayerClassMLDv2 = gopacket.NewLayerClass([]gopacket.LayerType{
+		LayerTypeMLDv1MulticastListenerReport,
+		LayerTypeMLDv1MulticastListenerDone,
+		LayerTypeMLDv2MulticastListenerReport,
+		LayerTypeMLDv1MulticastListenerQuery,
+		LayerTypeMLDv2MulticastListenerQuery,
+	})
+)
diff --git a/vendor/github.com/google/gopacket/layers/lcm.go b/vendor/github.com/google/gopacket/layers/lcm.go
new file mode 100644
index 0000000..58a4b82
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lcm.go
@@ -0,0 +1,218 @@
+// Copyright 2018 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// LCMShortHeaderMagic is the LCM small message header magic number
+	LCMShortHeaderMagic uint32 = 0x4c433032
+	// LCMFragmentedHeaderMagic is the LCM fragmented message header magic number
+	LCMFragmentedHeaderMagic uint32 = 0x4c433033
+)
+
+// LCM (Lightweight Communications and Marshalling) is a set of libraries and
+// tools for message passing and data marshalling, targeted at real-time systems
+// where high-bandwidth and low latency are critical. It provides a
+// publish/subscribe message passing model and automatic
+// marshalling/unmarshalling code generation with bindings for applications in a
+// variety of programming languages.
+//
+// References
+//   https://lcm-proj.github.io/
+//   https://github.com/lcm-proj/lcm
+type LCM struct {
+	// Common (short & fragmented header) fields
+	Magic          uint32
+	SequenceNumber uint32
+	// Fragmented header only fields
+	PayloadSize    uint32
+	FragmentOffset uint32
+	FragmentNumber uint16
+	TotalFragments uint16
+	// Common field
+	ChannelName string
+	// Gopacket helper fields
+	Fragmented  bool
+	fingerprint LCMFingerprint
+	contents    []byte
+	payload     []byte
+}
+
+// LCMFingerprint is the type of a LCM fingerprint.
+type LCMFingerprint uint64
+
+var (
+	// lcmLayerTypes contains a map of all LCM fingerprints that we support and
+	// their LayerType
+	lcmLayerTypes  = map[LCMFingerprint]gopacket.LayerType{}
+	layerTypeIndex = 1001
+)
+
+// RegisterLCMLayerType allows users to register decoders for the underlying
+// LCM payload. This is done based on the fingerprint that every LCM message
+// contains and which identifies it uniquely. If num is not the zero value it
+// will be used when registering with RegisterLayerType towards gopacket,
+// otherwise an incremental value starting from 1001 will be used.
+func RegisterLCMLayerType(num int, name string, fingerprint LCMFingerprint,
+	decoder gopacket.Decoder) gopacket.LayerType {
+	metadata := gopacket.LayerTypeMetadata{Name: name, Decoder: decoder}
+
+	if num == 0 {
+		num = layerTypeIndex
+		layerTypeIndex++
+	}
+
+	lcmLayerTypes[fingerprint] = gopacket.RegisterLayerType(num, metadata)
+
+	return lcmLayerTypes[fingerprint]
+}
+
+// SupportedLCMFingerprints returns a slice of all LCM fingerprints that has
+// been registered so far.
+func SupportedLCMFingerprints() []LCMFingerprint {
+	fingerprints := make([]LCMFingerprint, 0, len(lcmLayerTypes))
+	for fp := range lcmLayerTypes {
+		fingerprints = append(fingerprints, fp)
+	}
+	return fingerprints
+}
+
+// GetLCMLayerType returns the underlying LCM message's LayerType.
+// This LayerType has to be registered by using RegisterLCMLayerType.
+func GetLCMLayerType(fingerprint LCMFingerprint) gopacket.LayerType {
+	layerType, ok := lcmLayerTypes[fingerprint]
+	if !ok {
+		return gopacket.LayerTypePayload
+	}
+
+	return layerType
+}
+
+func decodeLCM(data []byte, p gopacket.PacketBuilder) error {
+	lcm := &LCM{}
+
+	err := lcm.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	p.AddLayer(lcm)
+	p.SetApplicationLayer(lcm)
+
+	return p.NextDecoder(lcm.NextLayerType())
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (lcm *LCM) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return errors.New("LCM < 8 bytes")
+	}
+	offset := 0
+
+	lcm.Magic = binary.BigEndian.Uint32(data[offset:4])
+	offset += 4
+
+	if lcm.Magic != LCMShortHeaderMagic && lcm.Magic != LCMFragmentedHeaderMagic {
+		return fmt.Errorf("Received LCM header magic %v does not match know "+
+			"LCM magic numbers. Dropping packet.", lcm.Magic)
+	}
+
+	lcm.SequenceNumber = binary.BigEndian.Uint32(data[offset:8])
+	offset += 4
+
+	if lcm.Magic == LCMFragmentedHeaderMagic {
+		lcm.Fragmented = true
+
+		lcm.PayloadSize = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+
+		lcm.FragmentOffset = binary.BigEndian.Uint32(data[offset : offset+4])
+		offset += 4
+
+		lcm.FragmentNumber = binary.BigEndian.Uint16(data[offset : offset+2])
+		offset += 2
+
+		lcm.TotalFragments = binary.BigEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	} else {
+		lcm.Fragmented = false
+	}
+
+	if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+		buffer := make([]byte, 0)
+		for _, b := range data[offset:] {
+			offset++
+
+			if b == 0 {
+				break
+			}
+
+			buffer = append(buffer, b)
+		}
+
+		lcm.ChannelName = string(buffer)
+	}
+
+	lcm.fingerprint = LCMFingerprint(
+		binary.BigEndian.Uint64(data[offset : offset+8]))
+
+	lcm.contents = data[:offset]
+	lcm.payload = data[offset:]
+
+	return nil
+}
+
+// CanDecode returns a set of layers that LCM objects can decode.
+// As LCM objects can only decode the LCM layer, we just return that layer.
+func (lcm LCM) CanDecode() gopacket.LayerClass {
+	return LayerTypeLCM
+}
+
+// NextLayerType specifies the LCM payload layer type following this header.
+// As LCM packets are serialized structs with uniq fingerprints for each uniq
+// combination of data types, lookup of correct layer type is based on that
+// fingerprint.
+func (lcm LCM) NextLayerType() gopacket.LayerType {
+	if !lcm.Fragmented || (lcm.Fragmented && lcm.FragmentNumber == 0) {
+		return GetLCMLayerType(lcm.fingerprint)
+	}
+
+	return gopacket.LayerTypeFragment
+}
+
+// LayerType returns LayerTypeLCM
+func (lcm LCM) LayerType() gopacket.LayerType {
+	return LayerTypeLCM
+}
+
+// LayerContents returns the contents of the LCM header.
+func (lcm LCM) LayerContents() []byte {
+	return lcm.contents
+}
+
+// LayerPayload returns the payload following this LCM header.
+func (lcm LCM) LayerPayload() []byte {
+	return lcm.payload
+}
+
+// Payload returns the payload following this LCM header.
+func (lcm LCM) Payload() []byte {
+	return lcm.LayerPayload()
+}
+
+// Fingerprint returns the LCM fingerprint of the underlying message.
+func (lcm LCM) Fingerprint() LCMFingerprint {
+	return lcm.fingerprint
+}
diff --git a/vendor/github.com/google/gopacket/layers/linux_sll.go b/vendor/github.com/google/gopacket/layers/linux_sll.go
new file mode 100644
index 0000000..85a4f8b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/linux_sll.go
@@ -0,0 +1,98 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+type LinuxSLLPacketType uint16
+
+const (
+	LinuxSLLPacketTypeHost      LinuxSLLPacketType = 0 // To us
+	LinuxSLLPacketTypeBroadcast LinuxSLLPacketType = 1 // To all
+	LinuxSLLPacketTypeMulticast LinuxSLLPacketType = 2 // To group
+	LinuxSLLPacketTypeOtherhost LinuxSLLPacketType = 3 // To someone else
+	LinuxSLLPacketTypeOutgoing  LinuxSLLPacketType = 4 // Outgoing of any type
+	// These ones are invisible by user level
+	LinuxSLLPacketTypeLoopback  LinuxSLLPacketType = 5 // MC/BRD frame looped back
+	LinuxSLLPacketTypeFastroute LinuxSLLPacketType = 6 // Fastrouted frame
+)
+
+func (l LinuxSLLPacketType) String() string {
+	switch l {
+	case LinuxSLLPacketTypeHost:
+		return "host"
+	case LinuxSLLPacketTypeBroadcast:
+		return "broadcast"
+	case LinuxSLLPacketTypeMulticast:
+		return "multicast"
+	case LinuxSLLPacketTypeOtherhost:
+		return "otherhost"
+	case LinuxSLLPacketTypeOutgoing:
+		return "outgoing"
+	case LinuxSLLPacketTypeLoopback:
+		return "loopback"
+	case LinuxSLLPacketTypeFastroute:
+		return "fastroute"
+	}
+	return fmt.Sprintf("Unknown(%d)", int(l))
+}
+
+type LinuxSLL struct {
+	BaseLayer
+	PacketType   LinuxSLLPacketType
+	AddrLen      uint16
+	Addr         net.HardwareAddr
+	EthernetType EthernetType
+	AddrType     uint16
+}
+
+// LayerType returns LayerTypeLinuxSLL.
+func (sll *LinuxSLL) LayerType() gopacket.LayerType { return LayerTypeLinuxSLL }
+
+func (sll *LinuxSLL) CanDecode() gopacket.LayerClass {
+	return LayerTypeLinuxSLL
+}
+
+func (sll *LinuxSLL) LinkFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointMAC, sll.Addr, nil)
+}
+
+func (sll *LinuxSLL) NextLayerType() gopacket.LayerType {
+	return sll.EthernetType.LayerType()
+}
+
+func (sll *LinuxSLL) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 16 {
+		return errors.New("Linux SLL packet too small")
+	}
+	sll.PacketType = LinuxSLLPacketType(binary.BigEndian.Uint16(data[0:2]))
+	sll.AddrType = binary.BigEndian.Uint16(data[2:4])
+	sll.AddrLen = binary.BigEndian.Uint16(data[4:6])
+
+	sll.Addr = net.HardwareAddr(data[6 : sll.AddrLen+6])
+	sll.EthernetType = EthernetType(binary.BigEndian.Uint16(data[14:16]))
+	sll.BaseLayer = BaseLayer{data[:16], data[16:]}
+
+	return nil
+}
+
+func decodeLinuxSLL(data []byte, p gopacket.PacketBuilder) error {
+	sll := &LinuxSLL{}
+	if err := sll.DecodeFromBytes(data, p); err != nil {
+		return err
+	}
+	p.AddLayer(sll)
+	p.SetLinkLayer(sll)
+	return p.NextDecoder(sll.EthernetType)
+}
diff --git a/vendor/github.com/google/gopacket/layers/llc.go b/vendor/github.com/google/gopacket/layers/llc.go
new file mode 100644
index 0000000..cad6803
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/llc.go
@@ -0,0 +1,193 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// LLC is the layer used for 802.2 Logical Link Control headers.
+// See http://standards.ieee.org/getieee802/download/802.2-1998.pdf
+type LLC struct {
+	BaseLayer
+	DSAP    uint8
+	IG      bool // true means group, false means individual
+	SSAP    uint8
+	CR      bool // true means response, false means command
+	Control uint16
+}
+
+// LayerType returns gopacket.LayerTypeLLC.
+func (l *LLC) LayerType() gopacket.LayerType { return LayerTypeLLC }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *LLC) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 3 {
+		return errors.New("LLC header too small")
+	}
+	l.DSAP = data[0] & 0xFE
+	l.IG = data[0]&0x1 != 0
+	l.SSAP = data[1] & 0xFE
+	l.CR = data[1]&0x1 != 0
+	l.Control = uint16(data[2])
+
+	if l.Control&0x1 == 0 || l.Control&0x3 == 0x1 {
+		if len(data) < 4 {
+			return errors.New("LLC header too small")
+		}
+		l.Control = l.Control<<8 | uint16(data[3])
+		l.Contents = data[:4]
+		l.Payload = data[4:]
+	} else {
+		l.Contents = data[:3]
+		l.Payload = data[3:]
+	}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *LLC) CanDecode() gopacket.LayerClass {
+	return LayerTypeLLC
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *LLC) NextLayerType() gopacket.LayerType {
+	switch {
+	case l.DSAP == 0xAA && l.SSAP == 0xAA:
+		return LayerTypeSNAP
+	case l.DSAP == 0x42 && l.SSAP == 0x42:
+		return LayerTypeSTP
+	}
+	return gopacket.LayerTypeZero // Not implemented
+}
+
+// SNAP is used inside LLC.  See
+// http://standards.ieee.org/getieee802/download/802-2001.pdf.
+// From http://en.wikipedia.org/wiki/Subnetwork_Access_Protocol:
+//  "[T]he Subnetwork Access Protocol (SNAP) is a mechanism for multiplexing,
+//  on networks using IEEE 802.2 LLC, more protocols than can be distinguished
+//  by the 8-bit 802.2 Service Access Point (SAP) fields."
+type SNAP struct {
+	BaseLayer
+	OrganizationalCode []byte
+	Type               EthernetType
+}
+
+// LayerType returns gopacket.LayerTypeSNAP.
+func (s *SNAP) LayerType() gopacket.LayerType { return LayerTypeSNAP }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (s *SNAP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 5 {
+		return errors.New("SNAP header too small")
+	}
+	s.OrganizationalCode = data[:3]
+	s.Type = EthernetType(binary.BigEndian.Uint16(data[3:5]))
+	s.BaseLayer = BaseLayer{data[:5], data[5:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (s *SNAP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSNAP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (s *SNAP) NextLayerType() gopacket.LayerType {
+	// See BUG(gconnel) in decodeSNAP
+	return s.Type.LayerType()
+}
+
+func decodeLLC(data []byte, p gopacket.PacketBuilder) error {
+	l := &LLC{}
+	err := l.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(l)
+	return p.NextDecoder(l.NextLayerType())
+}
+
+func decodeSNAP(data []byte, p gopacket.PacketBuilder) error {
+	s := &SNAP{}
+	err := s.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(s)
+	// BUG(gconnell):  When decoding SNAP, we treat the SNAP type as an Ethernet
+	// type.  This may not actually be an ethernet type in all cases,
+	// depending on the organizational code.  Right now, we don't check.
+	return p.NextDecoder(s.Type)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (l *LLC) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var igFlag, crFlag byte
+	var length int
+
+	if l.Control&0xFF00 != 0 {
+		length = 4
+	} else {
+		length = 3
+	}
+
+	if l.DSAP&0x1 != 0 {
+		return errors.New("DSAP value invalid, should not include IG flag bit")
+	}
+
+	if l.SSAP&0x1 != 0 {
+		return errors.New("SSAP value invalid, should not include CR flag bit")
+	}
+
+	if buf, err := b.PrependBytes(length); err != nil {
+		return err
+	} else {
+		igFlag = 0
+		if l.IG {
+			igFlag = 0x1
+		}
+
+		crFlag = 0
+		if l.CR {
+			crFlag = 0x1
+		}
+
+		buf[0] = l.DSAP + igFlag
+		buf[1] = l.SSAP + crFlag
+
+		if length == 4 {
+			buf[2] = uint8(l.Control >> 8)
+			buf[3] = uint8(l.Control)
+		} else {
+			buf[2] = uint8(l.Control)
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (s *SNAP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if buf, err := b.PrependBytes(5); err != nil {
+		return err
+	} else {
+		buf[0] = s.OrganizationalCode[0]
+		buf[1] = s.OrganizationalCode[1]
+		buf[2] = s.OrganizationalCode[2]
+		binary.BigEndian.PutUint16(buf[3:5], uint16(s.Type))
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/lldp.go b/vendor/github.com/google/gopacket/layers/lldp.go
new file mode 100644
index 0000000..16a5bba
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/lldp.go
@@ -0,0 +1,1603 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// LLDPTLVType is the type of each TLV value in a LinkLayerDiscovery packet.
+type LLDPTLVType byte
+
+const (
+	LLDPTLVEnd             LLDPTLVType = 0
+	LLDPTLVChassisID       LLDPTLVType = 1
+	LLDPTLVPortID          LLDPTLVType = 2
+	LLDPTLVTTL             LLDPTLVType = 3
+	LLDPTLVPortDescription LLDPTLVType = 4
+	LLDPTLVSysName         LLDPTLVType = 5
+	LLDPTLVSysDescription  LLDPTLVType = 6
+	LLDPTLVSysCapabilities LLDPTLVType = 7
+	LLDPTLVMgmtAddress     LLDPTLVType = 8
+	LLDPTLVOrgSpecific     LLDPTLVType = 127
+)
+
+// LinkLayerDiscoveryValue is a TLV value inside a LinkLayerDiscovery packet layer.
+type LinkLayerDiscoveryValue struct {
+	Type   LLDPTLVType
+	Length uint16
+	Value  []byte
+}
+
+func (c *LinkLayerDiscoveryValue) len() int {
+	return 0
+}
+
+// LLDPChassisIDSubType specifies the value type for a single LLDPChassisID.ID
+type LLDPChassisIDSubType byte
+
+// LLDP Chassis Types
+const (
+	LLDPChassisIDSubTypeReserved    LLDPChassisIDSubType = 0
+	LLDPChassisIDSubTypeChassisComp LLDPChassisIDSubType = 1
+	LLDPChassisIDSubtypeIfaceAlias  LLDPChassisIDSubType = 2
+	LLDPChassisIDSubTypePortComp    LLDPChassisIDSubType = 3
+	LLDPChassisIDSubTypeMACAddr     LLDPChassisIDSubType = 4
+	LLDPChassisIDSubTypeNetworkAddr LLDPChassisIDSubType = 5
+	LLDPChassisIDSubtypeIfaceName   LLDPChassisIDSubType = 6
+	LLDPChassisIDSubTypeLocal       LLDPChassisIDSubType = 7
+)
+
+type LLDPChassisID struct {
+	Subtype LLDPChassisIDSubType
+	ID      []byte
+}
+
+func (c *LLDPChassisID) serialize() []byte {
+
+	var buf = make([]byte, c.serializedLen())
+	idLen := uint16(LLDPTLVChassisID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+	binary.BigEndian.PutUint16(buf[0:2], idLen)
+	buf[2] = byte(c.Subtype)
+	copy(buf[3:], c.ID)
+	return buf
+}
+
+func (c *LLDPChassisID) serializedLen() int {
+	return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LLDPPortIDSubType specifies the value type for a single LLDPPortID.ID
+type LLDPPortIDSubType byte
+
+// LLDP PortID types
+const (
+	LLDPPortIDSubtypeReserved       LLDPPortIDSubType = 0
+	LLDPPortIDSubtypeIfaceAlias     LLDPPortIDSubType = 1
+	LLDPPortIDSubtypePortComp       LLDPPortIDSubType = 2
+	LLDPPortIDSubtypeMACAddr        LLDPPortIDSubType = 3
+	LLDPPortIDSubtypeNetworkAddr    LLDPPortIDSubType = 4
+	LLDPPortIDSubtypeIfaceName      LLDPPortIDSubType = 5
+	LLDPPortIDSubtypeAgentCircuitID LLDPPortIDSubType = 6
+	LLDPPortIDSubtypeLocal          LLDPPortIDSubType = 7
+)
+
+type LLDPPortID struct {
+	Subtype LLDPPortIDSubType
+	ID      []byte
+}
+
+func (c *LLDPPortID) serialize() []byte {
+
+	var buf = make([]byte, c.serializedLen())
+	idLen := uint16(LLDPTLVPortID)<<9 | uint16(len(c.ID)+1) //id should take 7 bits, length should take 9 bits, +1 for subtype
+	binary.BigEndian.PutUint16(buf[0:2], idLen)
+	buf[2] = byte(c.Subtype)
+	copy(buf[3:], c.ID)
+	return buf
+}
+
+func (c *LLDPPortID) serializedLen() int {
+	return len(c.ID) + 3 // +2 for id and length, +1 for subtype
+}
+
+// LinkLayerDiscovery is a packet layer containing the LinkLayer Discovery Protocol.
+// See http:http://standards.ieee.org/getieee802/download/802.1AB-2009.pdf
+// ChassisID, PortID and TTL are mandatory TLV's. Other values can be decoded
+// with DecodeValues()
+type LinkLayerDiscovery struct {
+	BaseLayer
+	ChassisID LLDPChassisID
+	PortID    LLDPPortID
+	TTL       uint16
+	Values    []LinkLayerDiscoveryValue
+}
+
+type IEEEOUI uint32
+
+// http://standards.ieee.org/develop/regauth/oui/oui.txt
+const (
+	IEEEOUI8021     IEEEOUI = 0x0080c2
+	IEEEOUI8023     IEEEOUI = 0x00120f
+	IEEEOUI80211    IEEEOUI = 0x000fac
+	IEEEOUI8021Qbg  IEEEOUI = 0x0013BF
+	IEEEOUICisco2   IEEEOUI = 0x000142
+	IEEEOUIMedia    IEEEOUI = 0x0012bb // TR-41
+	IEEEOUIProfinet IEEEOUI = 0x000ecf
+	IEEEOUIDCBX     IEEEOUI = 0x001b21
+)
+
+// LLDPOrgSpecificTLV is an Organisation-specific TLV
+type LLDPOrgSpecificTLV struct {
+	OUI     IEEEOUI
+	SubType uint8
+	Info    []byte
+}
+
+// LLDPCapabilities Types
+const (
+	LLDPCapsOther       uint16 = 1 << 0
+	LLDPCapsRepeater    uint16 = 1 << 1
+	LLDPCapsBridge      uint16 = 1 << 2
+	LLDPCapsWLANAP      uint16 = 1 << 3
+	LLDPCapsRouter      uint16 = 1 << 4
+	LLDPCapsPhone       uint16 = 1 << 5
+	LLDPCapsDocSis      uint16 = 1 << 6
+	LLDPCapsStationOnly uint16 = 1 << 7
+	LLDPCapsCVLAN       uint16 = 1 << 8
+	LLDPCapsSVLAN       uint16 = 1 << 9
+	LLDPCapsTmpr        uint16 = 1 << 10
+)
+
+// LLDPCapabilities represents the capabilities of a device
+type LLDPCapabilities struct {
+	Other       bool
+	Repeater    bool
+	Bridge      bool
+	WLANAP      bool
+	Router      bool
+	Phone       bool
+	DocSis      bool
+	StationOnly bool
+	CVLAN       bool
+	SVLAN       bool
+	TMPR        bool
+}
+
+type LLDPSysCapabilities struct {
+	SystemCap  LLDPCapabilities
+	EnabledCap LLDPCapabilities
+}
+
+type IANAAddressFamily byte
+
+// LLDP Management Address Subtypes
+// http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xml
+const (
+	IANAAddressFamilyReserved IANAAddressFamily = 0
+	IANAAddressFamilyIPV4     IANAAddressFamily = 1
+	IANAAddressFamilyIPV6     IANAAddressFamily = 2
+	IANAAddressFamilyNSAP     IANAAddressFamily = 3
+	IANAAddressFamilyHDLC     IANAAddressFamily = 4
+	IANAAddressFamilyBBN1822  IANAAddressFamily = 5
+	IANAAddressFamily802      IANAAddressFamily = 6
+	IANAAddressFamilyE163     IANAAddressFamily = 7
+	IANAAddressFamilyE164     IANAAddressFamily = 8
+	IANAAddressFamilyF69      IANAAddressFamily = 9
+	IANAAddressFamilyX121     IANAAddressFamily = 10
+	IANAAddressFamilyIPX      IANAAddressFamily = 11
+	IANAAddressFamilyAtalk    IANAAddressFamily = 12
+	IANAAddressFamilyDecnet   IANAAddressFamily = 13
+	IANAAddressFamilyBanyan   IANAAddressFamily = 14
+	IANAAddressFamilyE164NSAP IANAAddressFamily = 15
+	IANAAddressFamilyDNS      IANAAddressFamily = 16
+	IANAAddressFamilyDistname IANAAddressFamily = 17
+	IANAAddressFamilyASNumber IANAAddressFamily = 18
+	IANAAddressFamilyXTPIPV4  IANAAddressFamily = 19
+	IANAAddressFamilyXTPIPV6  IANAAddressFamily = 20
+	IANAAddressFamilyXTP      IANAAddressFamily = 21
+	IANAAddressFamilyFcWWPN   IANAAddressFamily = 22
+	IANAAddressFamilyFcWWNN   IANAAddressFamily = 23
+	IANAAddressFamilyGWID     IANAAddressFamily = 24
+	IANAAddressFamilyL2VPN    IANAAddressFamily = 25
+)
+
+type LLDPInterfaceSubtype byte
+
+// LLDP Interface Subtypes
+const (
+	LLDPInterfaceSubtypeUnknown LLDPInterfaceSubtype = 1
+	LLDPInterfaceSubtypeifIndex LLDPInterfaceSubtype = 2
+	LLDPInterfaceSubtypeSysPort LLDPInterfaceSubtype = 3
+)
+
+type LLDPMgmtAddress struct {
+	Subtype          IANAAddressFamily
+	Address          []byte
+	InterfaceSubtype LLDPInterfaceSubtype
+	InterfaceNumber  uint32
+	OID              string
+}
+
+// LinkLayerDiscoveryInfo represents the decoded details for a set of LinkLayerDiscoveryValues
+// Organisation-specific TLV's can be decoded using the various Decode() methods
+type LinkLayerDiscoveryInfo struct {
+	BaseLayer
+	PortDescription string
+	SysName         string
+	SysDescription  string
+	SysCapabilities LLDPSysCapabilities
+	MgmtAddress     LLDPMgmtAddress
+	OrgTLVs         []LLDPOrgSpecificTLV      // Private TLVs
+	Unknown         []LinkLayerDiscoveryValue // undecoded TLVs
+}
+
+/// IEEE 802.1 TLV Subtypes
+const (
+	LLDP8021SubtypePortVLANID       uint8 = 1
+	LLDP8021SubtypeProtocolVLANID   uint8 = 2
+	LLDP8021SubtypeVLANName         uint8 = 3
+	LLDP8021SubtypeProtocolIdentity uint8 = 4
+	LLDP8021SubtypeVDIUsageDigest   uint8 = 5
+	LLDP8021SubtypeManagementVID    uint8 = 6
+	LLDP8021SubtypeLinkAggregation  uint8 = 7
+)
+
+// VLAN Port Protocol ID options
+const (
+	LLDPProtocolVLANIDCapability byte = 1 << 1
+	LLDPProtocolVLANIDStatus     byte = 1 << 2
+)
+
+type PortProtocolVLANID struct {
+	Supported bool
+	Enabled   bool
+	ID        uint16
+}
+
+type VLANName struct {
+	ID   uint16
+	Name string
+}
+
+type ProtocolIdentity []byte
+
+// LACP options
+const (
+	LLDPAggregationCapability byte = 1 << 0
+	LLDPAggregationStatus     byte = 1 << 1
+)
+
+// IEEE 802 Link Aggregation parameters
+type LLDPLinkAggregation struct {
+	Supported bool
+	Enabled   bool
+	PortID    uint32
+}
+
+// LLDPInfo8021 represents the information carried in 802.1 Org-specific TLVs
+type LLDPInfo8021 struct {
+	PVID               uint16
+	PPVIDs             []PortProtocolVLANID
+	VLANNames          []VLANName
+	ProtocolIdentities []ProtocolIdentity
+	VIDUsageDigest     uint32
+	ManagementVID      uint16
+	LinkAggregation    LLDPLinkAggregation
+}
+
+// IEEE 802.3 TLV Subtypes
+const (
+	LLDP8023SubtypeMACPHY          uint8 = 1
+	LLDP8023SubtypeMDIPower        uint8 = 2
+	LLDP8023SubtypeLinkAggregation uint8 = 3
+	LLDP8023SubtypeMTU             uint8 = 4
+)
+
+// MACPHY options
+const (
+	LLDPMACPHYCapability byte = 1 << 0
+	LLDPMACPHYStatus     byte = 1 << 1
+)
+
+// From IANA-MAU-MIB (introduced by RFC 4836) - dot3MauType
+const (
+	LLDPMAUTypeUnknown         uint16 = 0
+	LLDPMAUTypeAUI             uint16 = 1
+	LLDPMAUType10Base5         uint16 = 2
+	LLDPMAUTypeFOIRL           uint16 = 3
+	LLDPMAUType10Base2         uint16 = 4
+	LLDPMAUType10BaseT         uint16 = 5
+	LLDPMAUType10BaseFP        uint16 = 6
+	LLDPMAUType10BaseFB        uint16 = 7
+	LLDPMAUType10BaseFL        uint16 = 8
+	LLDPMAUType10BROAD36       uint16 = 9
+	LLDPMAUType10BaseT_HD      uint16 = 10
+	LLDPMAUType10BaseT_FD      uint16 = 11
+	LLDPMAUType10BaseFL_HD     uint16 = 12
+	LLDPMAUType10BaseFL_FD     uint16 = 13
+	LLDPMAUType100BaseT4       uint16 = 14
+	LLDPMAUType100BaseTX_HD    uint16 = 15
+	LLDPMAUType100BaseTX_FD    uint16 = 16
+	LLDPMAUType100BaseFX_HD    uint16 = 17
+	LLDPMAUType100BaseFX_FD    uint16 = 18
+	LLDPMAUType100BaseT2_HD    uint16 = 19
+	LLDPMAUType100BaseT2_FD    uint16 = 20
+	LLDPMAUType1000BaseX_HD    uint16 = 21
+	LLDPMAUType1000BaseX_FD    uint16 = 22
+	LLDPMAUType1000BaseLX_HD   uint16 = 23
+	LLDPMAUType1000BaseLX_FD   uint16 = 24
+	LLDPMAUType1000BaseSX_HD   uint16 = 25
+	LLDPMAUType1000BaseSX_FD   uint16 = 26
+	LLDPMAUType1000BaseCX_HD   uint16 = 27
+	LLDPMAUType1000BaseCX_FD   uint16 = 28
+	LLDPMAUType1000BaseT_HD    uint16 = 29
+	LLDPMAUType1000BaseT_FD    uint16 = 30
+	LLDPMAUType10GBaseX        uint16 = 31
+	LLDPMAUType10GBaseLX4      uint16 = 32
+	LLDPMAUType10GBaseR        uint16 = 33
+	LLDPMAUType10GBaseER       uint16 = 34
+	LLDPMAUType10GBaseLR       uint16 = 35
+	LLDPMAUType10GBaseSR       uint16 = 36
+	LLDPMAUType10GBaseW        uint16 = 37
+	LLDPMAUType10GBaseEW       uint16 = 38
+	LLDPMAUType10GBaseLW       uint16 = 39
+	LLDPMAUType10GBaseSW       uint16 = 40
+	LLDPMAUType10GBaseCX4      uint16 = 41
+	LLDPMAUType2BaseTL         uint16 = 42
+	LLDPMAUType10PASS_TS       uint16 = 43
+	LLDPMAUType100BaseBX10D    uint16 = 44
+	LLDPMAUType100BaseBX10U    uint16 = 45
+	LLDPMAUType100BaseLX10     uint16 = 46
+	LLDPMAUType1000BaseBX10D   uint16 = 47
+	LLDPMAUType1000BaseBX10U   uint16 = 48
+	LLDPMAUType1000BaseLX10    uint16 = 49
+	LLDPMAUType1000BasePX10D   uint16 = 50
+	LLDPMAUType1000BasePX10U   uint16 = 51
+	LLDPMAUType1000BasePX20D   uint16 = 52
+	LLDPMAUType1000BasePX20U   uint16 = 53
+	LLDPMAUType10GBaseT        uint16 = 54
+	LLDPMAUType10GBaseLRM      uint16 = 55
+	LLDPMAUType1000BaseKX      uint16 = 56
+	LLDPMAUType10GBaseKX4      uint16 = 57
+	LLDPMAUType10GBaseKR       uint16 = 58
+	LLDPMAUType10_1GBasePRX_D1 uint16 = 59
+	LLDPMAUType10_1GBasePRX_D2 uint16 = 60
+	LLDPMAUType10_1GBasePRX_D3 uint16 = 61
+	LLDPMAUType10_1GBasePRX_U1 uint16 = 62
+	LLDPMAUType10_1GBasePRX_U2 uint16 = 63
+	LLDPMAUType10_1GBasePRX_U3 uint16 = 64
+	LLDPMAUType10GBasePR_D1    uint16 = 65
+	LLDPMAUType10GBasePR_D2    uint16 = 66
+	LLDPMAUType10GBasePR_D3    uint16 = 67
+	LLDPMAUType10GBasePR_U1    uint16 = 68
+	LLDPMAUType10GBasePR_U3    uint16 = 69
+)
+
+// From RFC 3636 - ifMauAutoNegCapAdvertisedBits
+const (
+	LLDPMAUPMDOther        uint16 = 1 << 15
+	LLDPMAUPMD10BaseT      uint16 = 1 << 14
+	LLDPMAUPMD10BaseT_FD   uint16 = 1 << 13
+	LLDPMAUPMD100BaseT4    uint16 = 1 << 12
+	LLDPMAUPMD100BaseTX    uint16 = 1 << 11
+	LLDPMAUPMD100BaseTX_FD uint16 = 1 << 10
+	LLDPMAUPMD100BaseT2    uint16 = 1 << 9
+	LLDPMAUPMD100BaseT2_FD uint16 = 1 << 8
+	LLDPMAUPMDFDXPAUSE     uint16 = 1 << 7
+	LLDPMAUPMDFDXAPAUSE    uint16 = 1 << 6
+	LLDPMAUPMDFDXSPAUSE    uint16 = 1 << 5
+	LLDPMAUPMDFDXBPAUSE    uint16 = 1 << 4
+	LLDPMAUPMD1000BaseX    uint16 = 1 << 3
+	LLDPMAUPMD1000BaseX_FD uint16 = 1 << 2
+	LLDPMAUPMD1000BaseT    uint16 = 1 << 1
+	LLDPMAUPMD1000BaseT_FD uint16 = 1 << 0
+)
+
+// Inverted ifMauAutoNegCapAdvertisedBits if required
+// (Some manufacturers misinterpreted the spec -
+// see https://bugs.wireshark.org/bugzilla/show_bug.cgi?id=1455)
+const (
+	LLDPMAUPMDOtherInv        uint16 = 1 << 0
+	LLDPMAUPMD10BaseTInv      uint16 = 1 << 1
+	LLDPMAUPMD10BaseT_FDInv   uint16 = 1 << 2
+	LLDPMAUPMD100BaseT4Inv    uint16 = 1 << 3
+	LLDPMAUPMD100BaseTXInv    uint16 = 1 << 4
+	LLDPMAUPMD100BaseTX_FDInv uint16 = 1 << 5
+	LLDPMAUPMD100BaseT2Inv    uint16 = 1 << 6
+	LLDPMAUPMD100BaseT2_FDInv uint16 = 1 << 7
+	LLDPMAUPMDFDXPAUSEInv     uint16 = 1 << 8
+	LLDPMAUPMDFDXAPAUSEInv    uint16 = 1 << 9
+	LLDPMAUPMDFDXSPAUSEInv    uint16 = 1 << 10
+	LLDPMAUPMDFDXBPAUSEInv    uint16 = 1 << 11
+	LLDPMAUPMD1000BaseXInv    uint16 = 1 << 12
+	LLDPMAUPMD1000BaseX_FDInv uint16 = 1 << 13
+	LLDPMAUPMD1000BaseTInv    uint16 = 1 << 14
+	LLDPMAUPMD1000BaseT_FDInv uint16 = 1 << 15
+)
+
+type LLDPMACPHYConfigStatus struct {
+	AutoNegSupported  bool
+	AutoNegEnabled    bool
+	AutoNegCapability uint16
+	MAUType           uint16
+}
+
+// MDI Power options
+const (
+	LLDPMDIPowerPortClass    byte = 1 << 0
+	LLDPMDIPowerCapability   byte = 1 << 1
+	LLDPMDIPowerStatus       byte = 1 << 2
+	LLDPMDIPowerPairsAbility byte = 1 << 3
+)
+
+type LLDPPowerType byte
+
+type LLDPPowerSource byte
+
+type LLDPPowerPriority byte
+
+const (
+	LLDPPowerPriorityUnknown LLDPPowerPriority = 0
+	LLDPPowerPriorityMedium  LLDPPowerPriority = 1
+	LLDPPowerPriorityHigh    LLDPPowerPriority = 2
+	LLDPPowerPriorityLow     LLDPPowerPriority = 3
+)
+
+type LLDPPowerViaMDI8023 struct {
+	PortClassPSE    bool // false = PD
+	PSESupported    bool
+	PSEEnabled      bool
+	PSEPairsAbility bool
+	PSEPowerPair    uint8
+	PSEClass        uint8
+	Type            LLDPPowerType
+	Source          LLDPPowerSource
+	Priority        LLDPPowerPriority
+	Requested       uint16 // 1-510 Watts
+	Allocated       uint16 // 1-510 Watts
+}
+
+// LLDPInfo8023 represents the information carried in 802.3 Org-specific TLVs
+type LLDPInfo8023 struct {
+	MACPHYConfigStatus LLDPMACPHYConfigStatus
+	PowerViaMDI        LLDPPowerViaMDI8023
+	LinkAggregation    LLDPLinkAggregation
+	MTU                uint16
+}
+
+// IEEE 802.1Qbg TLV Subtypes
+const (
+	LLDP8021QbgEVB   uint8 = 0
+	LLDP8021QbgCDCP  uint8 = 1
+	LLDP8021QbgVDP   uint8 = 2
+	LLDP8021QbgEVB22 uint8 = 13
+)
+
+// LLDPEVBCapabilities Types
+const (
+	LLDPEVBCapsSTD uint16 = 1 << 7
+	LLDPEVBCapsRR  uint16 = 1 << 6
+	LLDPEVBCapsRTE uint16 = 1 << 2
+	LLDPEVBCapsECP uint16 = 1 << 1
+	LLDPEVBCapsVDP uint16 = 1 << 0
+)
+
+// LLDPEVBCapabilities represents the EVB capabilities of a device
+type LLDPEVBCapabilities struct {
+	StandardBridging            bool
+	ReflectiveRelay             bool
+	RetransmissionTimerExponent bool
+	EdgeControlProtocol         bool
+	VSIDiscoveryProtocol        bool
+}
+
+type LLDPEVBSettings struct {
+	Supported      LLDPEVBCapabilities
+	Enabled        LLDPEVBCapabilities
+	SupportedVSIs  uint16
+	ConfiguredVSIs uint16
+	RTEExponent    uint8
+}
+
+// LLDPInfo8021Qbg represents the information carried in 802.1Qbg Org-specific TLVs
+type LLDPInfo8021Qbg struct {
+	EVBSettings LLDPEVBSettings
+}
+
+type LLDPMediaSubtype uint8
+
+// Media TLV Subtypes
+const (
+	LLDPMediaTypeCapabilities LLDPMediaSubtype = 1
+	LLDPMediaTypeNetwork      LLDPMediaSubtype = 2
+	LLDPMediaTypeLocation     LLDPMediaSubtype = 3
+	LLDPMediaTypePower        LLDPMediaSubtype = 4
+	LLDPMediaTypeHardware     LLDPMediaSubtype = 5
+	LLDPMediaTypeFirmware     LLDPMediaSubtype = 6
+	LLDPMediaTypeSoftware     LLDPMediaSubtype = 7
+	LLDPMediaTypeSerial       LLDPMediaSubtype = 8
+	LLDPMediaTypeManufacturer LLDPMediaSubtype = 9
+	LLDPMediaTypeModel        LLDPMediaSubtype = 10
+	LLDPMediaTypeAssetID      LLDPMediaSubtype = 11
+)
+
+type LLDPMediaClass uint8
+
+// Media Class Values
+const (
+	LLDPMediaClassUndefined   LLDPMediaClass = 0
+	LLDPMediaClassEndpointI   LLDPMediaClass = 1
+	LLDPMediaClassEndpointII  LLDPMediaClass = 2
+	LLDPMediaClassEndpointIII LLDPMediaClass = 3
+	LLDPMediaClassNetwork     LLDPMediaClass = 4
+)
+
+// LLDPMediaCapabilities Types
+const (
+	LLDPMediaCapsLLDP      uint16 = 1 << 0
+	LLDPMediaCapsNetwork   uint16 = 1 << 1
+	LLDPMediaCapsLocation  uint16 = 1 << 2
+	LLDPMediaCapsPowerPSE  uint16 = 1 << 3
+	LLDPMediaCapsPowerPD   uint16 = 1 << 4
+	LLDPMediaCapsInventory uint16 = 1 << 5
+)
+
+// LLDPMediaCapabilities represents the LLDP Media capabilities of a device
+type LLDPMediaCapabilities struct {
+	Capabilities  bool
+	NetworkPolicy bool
+	Location      bool
+	PowerPSE      bool
+	PowerPD       bool
+	Inventory     bool
+	Class         LLDPMediaClass
+}
+
+type LLDPApplicationType uint8
+
+const (
+	LLDPAppTypeReserved            LLDPApplicationType = 0
+	LLDPAppTypeVoice               LLDPApplicationType = 1
+	LLDPappTypeVoiceSignaling      LLDPApplicationType = 2
+	LLDPappTypeGuestVoice          LLDPApplicationType = 3
+	LLDPappTypeGuestVoiceSignaling LLDPApplicationType = 4
+	LLDPappTypeSoftphoneVoice      LLDPApplicationType = 5
+	LLDPappTypeVideoConferencing   LLDPApplicationType = 6
+	LLDPappTypeStreamingVideo      LLDPApplicationType = 7
+	LLDPappTypeVideoSignaling      LLDPApplicationType = 8
+)
+
+type LLDPNetworkPolicy struct {
+	ApplicationType LLDPApplicationType
+	Defined         bool
+	Tagged          bool
+	VLANId          uint16
+	L2Priority      uint16
+	DSCPValue       uint8
+}
+
+type LLDPLocationFormat uint8
+
+const (
+	LLDPLocationFormatInvalid    LLDPLocationFormat = 0
+	LLDPLocationFormatCoordinate LLDPLocationFormat = 1
+	LLDPLocationFormatAddress    LLDPLocationFormat = 2
+	LLDPLocationFormatECS        LLDPLocationFormat = 3
+)
+
+type LLDPLocationAddressWhat uint8
+
+const (
+	LLDPLocationAddressWhatDHCP    LLDPLocationAddressWhat = 0
+	LLDPLocationAddressWhatNetwork LLDPLocationAddressWhat = 1
+	LLDPLocationAddressWhatClient  LLDPLocationAddressWhat = 2
+)
+
+type LLDPLocationAddressType uint8
+
+const (
+	LLDPLocationAddressTypeLanguage       LLDPLocationAddressType = 0
+	LLDPLocationAddressTypeNational       LLDPLocationAddressType = 1
+	LLDPLocationAddressTypeCounty         LLDPLocationAddressType = 2
+	LLDPLocationAddressTypeCity           LLDPLocationAddressType = 3
+	LLDPLocationAddressTypeCityDivision   LLDPLocationAddressType = 4
+	LLDPLocationAddressTypeNeighborhood   LLDPLocationAddressType = 5
+	LLDPLocationAddressTypeStreet         LLDPLocationAddressType = 6
+	LLDPLocationAddressTypeLeadingStreet  LLDPLocationAddressType = 16
+	LLDPLocationAddressTypeTrailingStreet LLDPLocationAddressType = 17
+	LLDPLocationAddressTypeStreetSuffix   LLDPLocationAddressType = 18
+	LLDPLocationAddressTypeHouseNum       LLDPLocationAddressType = 19
+	LLDPLocationAddressTypeHouseSuffix    LLDPLocationAddressType = 20
+	LLDPLocationAddressTypeLandmark       LLDPLocationAddressType = 21
+	LLDPLocationAddressTypeAdditional     LLDPLocationAddressType = 22
+	LLDPLocationAddressTypeName           LLDPLocationAddressType = 23
+	LLDPLocationAddressTypePostal         LLDPLocationAddressType = 24
+	LLDPLocationAddressTypeBuilding       LLDPLocationAddressType = 25
+	LLDPLocationAddressTypeUnit           LLDPLocationAddressType = 26
+	LLDPLocationAddressTypeFloor          LLDPLocationAddressType = 27
+	LLDPLocationAddressTypeRoom           LLDPLocationAddressType = 28
+	LLDPLocationAddressTypePlace          LLDPLocationAddressType = 29
+	LLDPLocationAddressTypeScript         LLDPLocationAddressType = 128
+)
+
+type LLDPLocationCoordinate struct {
+	LatitudeResolution  uint8
+	Latitude            uint64
+	LongitudeResolution uint8
+	Longitude           uint64
+	AltitudeType        uint8
+	AltitudeResolution  uint16
+	Altitude            uint32
+	Datum               uint8
+}
+
+type LLDPLocationAddressLine struct {
+	Type  LLDPLocationAddressType
+	Value string
+}
+
+type LLDPLocationAddress struct {
+	What         LLDPLocationAddressWhat
+	CountryCode  string
+	AddressLines []LLDPLocationAddressLine
+}
+
+type LLDPLocationECS struct {
+	ELIN string
+}
+
+// LLDP represents a physical location.
+// Only one of the embedded types will contain values, depending on Format.
+type LLDPLocation struct {
+	Format     LLDPLocationFormat
+	Coordinate LLDPLocationCoordinate
+	Address    LLDPLocationAddress
+	ECS        LLDPLocationECS
+}
+
+type LLDPPowerViaMDI struct {
+	Type     LLDPPowerType
+	Source   LLDPPowerSource
+	Priority LLDPPowerPriority
+	Value    uint16
+}
+
+// LLDPInfoMedia represents the information carried in TR-41 Org-specific TLVs
+type LLDPInfoMedia struct {
+	MediaCapabilities LLDPMediaCapabilities
+	NetworkPolicy     LLDPNetworkPolicy
+	Location          LLDPLocation
+	PowerViaMDI       LLDPPowerViaMDI
+	HardwareRevision  string
+	FirmwareRevision  string
+	SoftwareRevision  string
+	SerialNumber      string
+	Manufacturer      string
+	Model             string
+	AssetID           string
+}
+
+type LLDPCisco2Subtype uint8
+
+// Cisco2 TLV Subtypes
+const (
+	LLDPCisco2PowerViaMDI LLDPCisco2Subtype = 1
+)
+
+const (
+	LLDPCiscoPSESupport   uint8 = 1 << 0
+	LLDPCiscoArchShared   uint8 = 1 << 1
+	LLDPCiscoPDSparePair  uint8 = 1 << 2
+	LLDPCiscoPSESparePair uint8 = 1 << 3
+)
+
+// LLDPInfoCisco2 represents the information carried in Cisco Org-specific TLVs
+type LLDPInfoCisco2 struct {
+	PSEFourWirePoESupported       bool
+	PDSparePairArchitectureShared bool
+	PDRequestSparePairPoEOn       bool
+	PSESparePairPoEOn             bool
+}
+
+// Profinet Subtypes
+type LLDPProfinetSubtype uint8
+
+const (
+	LLDPProfinetPNIODelay         LLDPProfinetSubtype = 1
+	LLDPProfinetPNIOPortStatus    LLDPProfinetSubtype = 2
+	LLDPProfinetPNIOMRPPortStatus LLDPProfinetSubtype = 4
+	LLDPProfinetPNIOChassisMAC    LLDPProfinetSubtype = 5
+	LLDPProfinetPNIOPTCPStatus    LLDPProfinetSubtype = 6
+)
+
+type LLDPPNIODelay struct {
+	RXLocal    uint32
+	RXRemote   uint32
+	TXLocal    uint32
+	TXRemote   uint32
+	CableLocal uint32
+}
+
+type LLDPPNIOPortStatus struct {
+	Class2 uint16
+	Class3 uint16
+}
+
+type LLDPPNIOMRPPortStatus struct {
+	UUID   []byte
+	Status uint16
+}
+
+type LLDPPNIOPTCPStatus struct {
+	MasterAddress     []byte
+	SubdomainUUID     []byte
+	IRDataUUID        []byte
+	PeriodValid       bool
+	PeriodLength      uint32
+	RedPeriodValid    bool
+	RedPeriodBegin    uint32
+	OrangePeriodValid bool
+	OrangePeriodBegin uint32
+	GreenPeriodValid  bool
+	GreenPeriodBegin  uint32
+}
+
+// LLDPInfoProfinet represents the information carried in Profinet Org-specific TLVs
+type LLDPInfoProfinet struct {
+	PNIODelay         LLDPPNIODelay
+	PNIOPortStatus    LLDPPNIOPortStatus
+	PNIOMRPPortStatus LLDPPNIOMRPPortStatus
+	ChassisMAC        []byte
+	PNIOPTCPStatus    LLDPPNIOPTCPStatus
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscovery.
+func (c *LinkLayerDiscovery) LayerType() gopacket.LayerType {
+	return LayerTypeLinkLayerDiscovery
+}
+
+// SerializeTo serializes LLDP packet to bytes and writes on SerializeBuffer.
+func (c *LinkLayerDiscovery) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	chassIDLen := c.ChassisID.serializedLen()
+	portIDLen := c.PortID.serializedLen()
+	vb, err := b.AppendBytes(chassIDLen + portIDLen + 4) // +4 for TTL
+	if err != nil {
+		return err
+	}
+	copy(vb[:chassIDLen], c.ChassisID.serialize())
+	copy(vb[chassIDLen:], c.PortID.serialize())
+	ttlIDLen := uint16(LLDPTLVTTL)<<9 | uint16(2)
+	binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen:], ttlIDLen)
+	binary.BigEndian.PutUint16(vb[chassIDLen+portIDLen+2:], c.TTL)
+
+	for _, v := range c.Values {
+		vb, err := b.AppendBytes(int(v.Length) + 2) // +2 for TLV type and length; 1 byte for subtype is included in v.Value
+		if err != nil {
+			return err
+		}
+		idLen := ((uint16(v.Type) << 9) | v.Length)
+		binary.BigEndian.PutUint16(vb[0:2], idLen)
+		copy(vb[2:], v.Value)
+	}
+
+	vb, err = b.AppendBytes(2) // End Tlv, 2 bytes
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(vb[len(vb)-2:], uint16(0)) //End tlv, 2 bytes, all zero
+	return nil
+
+}
+
+func decodeLinkLayerDiscovery(data []byte, p gopacket.PacketBuilder) error {
+	var vals []LinkLayerDiscoveryValue
+	vData := data[0:]
+	for len(vData) > 0 {
+		if len(vData) < 2 {
+			p.SetTruncated()
+			return errors.New("LLDP vdata < 2 bytes")
+		}
+		nbit := vData[0] & 0x01
+		t := LLDPTLVType(vData[0] >> 1)
+		val := LinkLayerDiscoveryValue{Type: t, Length: uint16(nbit)<<8 + uint16(vData[1])}
+		if val.Length > 0 {
+			if len(vData) < int(val.Length+2) {
+				p.SetTruncated()
+				return fmt.Errorf("LLDP VData < %d bytes", val.Length+2)
+			}
+			val.Value = vData[2 : val.Length+2]
+		}
+		vals = append(vals, val)
+		if t == LLDPTLVEnd {
+			break
+		}
+		if len(vData) < int(2+val.Length) {
+			return errors.New("Malformed LinkLayerDiscovery Header")
+		}
+		vData = vData[2+val.Length:]
+	}
+	if len(vals) < 4 {
+		return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+	}
+	c := &LinkLayerDiscovery{}
+	gotEnd := false
+	for _, v := range vals {
+		switch v.Type {
+		case LLDPTLVEnd:
+			gotEnd = true
+		case LLDPTLVChassisID:
+			if len(v.Value) < 2 {
+				return errors.New("Malformed LinkLayerDiscovery ChassisID TLV")
+			}
+			c.ChassisID.Subtype = LLDPChassisIDSubType(v.Value[0])
+			c.ChassisID.ID = v.Value[1:]
+		case LLDPTLVPortID:
+			if len(v.Value) < 2 {
+				return errors.New("Malformed LinkLayerDiscovery PortID TLV")
+			}
+			c.PortID.Subtype = LLDPPortIDSubType(v.Value[0])
+			c.PortID.ID = v.Value[1:]
+		case LLDPTLVTTL:
+			if len(v.Value) < 2 {
+				return errors.New("Malformed LinkLayerDiscovery TTL TLV")
+			}
+			c.TTL = binary.BigEndian.Uint16(v.Value[0:2])
+		default:
+			c.Values = append(c.Values, v)
+		}
+	}
+	if c.ChassisID.Subtype == 0 || c.PortID.Subtype == 0 || !gotEnd {
+		return errors.New("Missing mandatory LinkLayerDiscovery TLV")
+	}
+	c.Contents = data
+	p.AddLayer(c)
+
+	info := &LinkLayerDiscoveryInfo{}
+	p.AddLayer(info)
+	for _, v := range c.Values {
+		switch v.Type {
+		case LLDPTLVPortDescription:
+			info.PortDescription = string(v.Value)
+		case LLDPTLVSysName:
+			info.SysName = string(v.Value)
+		case LLDPTLVSysDescription:
+			info.SysDescription = string(v.Value)
+		case LLDPTLVSysCapabilities:
+			if err := checkLLDPTLVLen(v, 4); err != nil {
+				return err
+			}
+			info.SysCapabilities.SystemCap = getCapabilities(binary.BigEndian.Uint16(v.Value[0:2]))
+			info.SysCapabilities.EnabledCap = getCapabilities(binary.BigEndian.Uint16(v.Value[2:4]))
+		case LLDPTLVMgmtAddress:
+			if err := checkLLDPTLVLen(v, 9); err != nil {
+				return err
+			}
+			mlen := v.Value[0]
+			if err := checkLLDPTLVLen(v, int(mlen+7)); err != nil {
+				return err
+			}
+			info.MgmtAddress.Subtype = IANAAddressFamily(v.Value[1])
+			info.MgmtAddress.Address = v.Value[2 : mlen+1]
+			info.MgmtAddress.InterfaceSubtype = LLDPInterfaceSubtype(v.Value[mlen+1])
+			info.MgmtAddress.InterfaceNumber = binary.BigEndian.Uint32(v.Value[mlen+2 : mlen+6])
+			olen := v.Value[mlen+6]
+			if err := checkLLDPTLVLen(v, int(mlen+7+olen)); err != nil {
+				return err
+			}
+			info.MgmtAddress.OID = string(v.Value[mlen+7 : mlen+7+olen])
+		case LLDPTLVOrgSpecific:
+			if err := checkLLDPTLVLen(v, 4); err != nil {
+				return err
+			}
+			info.OrgTLVs = append(info.OrgTLVs, LLDPOrgSpecificTLV{IEEEOUI(binary.BigEndian.Uint32(append([]byte{byte(0)}, v.Value[0:3]...))), uint8(v.Value[3]), v.Value[4:]})
+		}
+	}
+	return nil
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021() (info LLDPInfo8021, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUI8021 {
+			continue
+		}
+		switch o.SubType {
+		case LLDP8021SubtypePortVLANID:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			info.PVID = binary.BigEndian.Uint16(o.Info[0:2])
+		case LLDP8021SubtypeProtocolVLANID:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPProtocolVLANIDCapability > 0)
+			en := (o.Info[0]&LLDPProtocolVLANIDStatus > 0)
+			id := binary.BigEndian.Uint16(o.Info[1:3])
+			info.PPVIDs = append(info.PPVIDs, PortProtocolVLANID{sup, en, id})
+		case LLDP8021SubtypeVLANName:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			id := binary.BigEndian.Uint16(o.Info[0:2])
+			info.VLANNames = append(info.VLANNames, VLANName{id, string(o.Info[3:])})
+		case LLDP8021SubtypeProtocolIdentity:
+			if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+				return
+			}
+			l := int(o.Info[0])
+			if l > 0 {
+				info.ProtocolIdentities = append(info.ProtocolIdentities, o.Info[1:1+l])
+			}
+		case LLDP8021SubtypeVDIUsageDigest:
+			if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+				return
+			}
+			info.VIDUsageDigest = binary.BigEndian.Uint32(o.Info[0:4])
+		case LLDP8021SubtypeManagementVID:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			info.ManagementVID = binary.BigEndian.Uint16(o.Info[0:2])
+		case LLDP8021SubtypeLinkAggregation:
+			if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPAggregationCapability > 0)
+			en := (o.Info[0]&LLDPAggregationStatus > 0)
+			info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8023() (info LLDPInfo8023, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUI8023 {
+			continue
+		}
+		switch o.SubType {
+		case LLDP8023SubtypeMACPHY:
+			if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPMACPHYCapability > 0)
+			en := (o.Info[0]&LLDPMACPHYStatus > 0)
+			ca := binary.BigEndian.Uint16(o.Info[1:3])
+			mau := binary.BigEndian.Uint16(o.Info[3:5])
+			info.MACPHYConfigStatus = LLDPMACPHYConfigStatus{sup, en, ca, mau}
+		case LLDP8023SubtypeMDIPower:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			info.PowerViaMDI.PortClassPSE = (o.Info[0]&LLDPMDIPowerPortClass > 0)
+			info.PowerViaMDI.PSESupported = (o.Info[0]&LLDPMDIPowerCapability > 0)
+			info.PowerViaMDI.PSEEnabled = (o.Info[0]&LLDPMDIPowerStatus > 0)
+			info.PowerViaMDI.PSEPairsAbility = (o.Info[0]&LLDPMDIPowerPairsAbility > 0)
+			info.PowerViaMDI.PSEPowerPair = uint8(o.Info[1])
+			info.PowerViaMDI.PSEClass = uint8(o.Info[2])
+			if len(o.Info) >= 7 {
+				info.PowerViaMDI.Type = LLDPPowerType((o.Info[3] & 0xc0) >> 6)
+				info.PowerViaMDI.Source = LLDPPowerSource((o.Info[3] & 0x30) >> 4)
+				if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+					info.PowerViaMDI.Source += 128 // For Stringify purposes
+				}
+				info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[3] & 0x0f)
+				info.PowerViaMDI.Requested = binary.BigEndian.Uint16(o.Info[4:6])
+				info.PowerViaMDI.Allocated = binary.BigEndian.Uint16(o.Info[6:8])
+			}
+		case LLDP8023SubtypeLinkAggregation:
+			if err = checkLLDPOrgSpecificLen(o, 5); err != nil {
+				return
+			}
+			sup := (o.Info[0]&LLDPAggregationCapability > 0)
+			en := (o.Info[0]&LLDPAggregationStatus > 0)
+			info.LinkAggregation = LLDPLinkAggregation{sup, en, binary.BigEndian.Uint32(o.Info[1:5])}
+		case LLDP8023SubtypeMTU:
+			if err = checkLLDPOrgSpecificLen(o, 2); err != nil {
+				return
+			}
+			info.MTU = binary.BigEndian.Uint16(o.Info[0:2])
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) Decode8021Qbg() (info LLDPInfo8021Qbg, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUI8021Qbg {
+			continue
+		}
+		switch o.SubType {
+		case LLDP8021QbgEVB:
+			if err = checkLLDPOrgSpecificLen(o, 9); err != nil {
+				return
+			}
+			info.EVBSettings.Supported = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[0:2]))
+			info.EVBSettings.Enabled = getEVBCapabilities(binary.BigEndian.Uint16(o.Info[2:4]))
+			info.EVBSettings.SupportedVSIs = binary.BigEndian.Uint16(o.Info[4:6])
+			info.EVBSettings.ConfiguredVSIs = binary.BigEndian.Uint16(o.Info[6:8])
+			info.EVBSettings.RTEExponent = uint8(o.Info[8])
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeMedia() (info LLDPInfoMedia, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUIMedia {
+			continue
+		}
+		switch LLDPMediaSubtype(o.SubType) {
+		case LLDPMediaTypeCapabilities:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			b := binary.BigEndian.Uint16(o.Info[0:2])
+			info.MediaCapabilities.Capabilities = (b & LLDPMediaCapsLLDP) > 0
+			info.MediaCapabilities.NetworkPolicy = (b & LLDPMediaCapsNetwork) > 0
+			info.MediaCapabilities.Location = (b & LLDPMediaCapsLocation) > 0
+			info.MediaCapabilities.PowerPSE = (b & LLDPMediaCapsPowerPSE) > 0
+			info.MediaCapabilities.PowerPD = (b & LLDPMediaCapsPowerPD) > 0
+			info.MediaCapabilities.Inventory = (b & LLDPMediaCapsInventory) > 0
+			info.MediaCapabilities.Class = LLDPMediaClass(o.Info[2])
+		case LLDPMediaTypeNetwork:
+			if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+				return
+			}
+			info.NetworkPolicy.ApplicationType = LLDPApplicationType(o.Info[0])
+			b := binary.BigEndian.Uint16(o.Info[1:3])
+			info.NetworkPolicy.Defined = (b & 0x8000) == 0
+			info.NetworkPolicy.Tagged = (b & 0x4000) > 0
+			info.NetworkPolicy.VLANId = (b & 0x1ffe) >> 1
+			b = binary.BigEndian.Uint16(o.Info[2:4])
+			info.NetworkPolicy.L2Priority = (b & 0x01c0) >> 6
+			info.NetworkPolicy.DSCPValue = uint8(o.Info[3] & 0x3f)
+		case LLDPMediaTypeLocation:
+			if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+				return
+			}
+			info.Location.Format = LLDPLocationFormat(o.Info[0])
+			o.Info = o.Info[1:]
+			switch info.Location.Format {
+			case LLDPLocationFormatCoordinate:
+				if err = checkLLDPOrgSpecificLen(o, 16); err != nil {
+					return
+				}
+				info.Location.Coordinate.LatitudeResolution = uint8(o.Info[0]&0xfc) >> 2
+				b := binary.BigEndian.Uint64(o.Info[0:8])
+				info.Location.Coordinate.Latitude = (b & 0x03ffffffff000000) >> 24
+				info.Location.Coordinate.LongitudeResolution = uint8(o.Info[5]&0xfc) >> 2
+				b = binary.BigEndian.Uint64(o.Info[5:13])
+				info.Location.Coordinate.Longitude = (b & 0x03ffffffff000000) >> 24
+				info.Location.Coordinate.AltitudeType = uint8((o.Info[10] & 0x30) >> 4)
+				b1 := binary.BigEndian.Uint16(o.Info[10:12])
+				info.Location.Coordinate.AltitudeResolution = (b1 & 0xfc0) >> 6
+				b2 := binary.BigEndian.Uint32(o.Info[11:15])
+				info.Location.Coordinate.Altitude = b2 & 0x3fffffff
+				info.Location.Coordinate.Datum = uint8(o.Info[15])
+			case LLDPLocationFormatAddress:
+				if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+					return
+				}
+				//ll := uint8(o.Info[0])
+				info.Location.Address.What = LLDPLocationAddressWhat(o.Info[1])
+				info.Location.Address.CountryCode = string(o.Info[2:4])
+				data := o.Info[4:]
+				for len(data) > 1 {
+					aType := LLDPLocationAddressType(data[0])
+					aLen := int(data[1])
+					if len(data) >= aLen+2 {
+						info.Location.Address.AddressLines = append(info.Location.Address.AddressLines, LLDPLocationAddressLine{aType, string(data[2 : aLen+2])})
+						data = data[aLen+2:]
+					} else {
+						break
+					}
+				}
+			case LLDPLocationFormatECS:
+				info.Location.ECS.ELIN = string(o.Info)
+			}
+		case LLDPMediaTypePower:
+			if err = checkLLDPOrgSpecificLen(o, 3); err != nil {
+				return
+			}
+			info.PowerViaMDI.Type = LLDPPowerType((o.Info[0] & 0xc0) >> 6)
+			info.PowerViaMDI.Source = LLDPPowerSource((o.Info[0] & 0x30) >> 4)
+			if info.PowerViaMDI.Type == 1 || info.PowerViaMDI.Type == 3 {
+				info.PowerViaMDI.Source += 128 // For Stringify purposes
+			}
+			info.PowerViaMDI.Priority = LLDPPowerPriority(o.Info[0] & 0x0f)
+			info.PowerViaMDI.Value = binary.BigEndian.Uint16(o.Info[1:3]) * 100 // 0 to 102.3 w, 0.1W increments
+		case LLDPMediaTypeHardware:
+			info.HardwareRevision = string(o.Info)
+		case LLDPMediaTypeFirmware:
+			info.FirmwareRevision = string(o.Info)
+		case LLDPMediaTypeSoftware:
+			info.SoftwareRevision = string(o.Info)
+		case LLDPMediaTypeSerial:
+			info.SerialNumber = string(o.Info)
+		case LLDPMediaTypeManufacturer:
+			info.Manufacturer = string(o.Info)
+		case LLDPMediaTypeModel:
+			info.Model = string(o.Info)
+		case LLDPMediaTypeAssetID:
+			info.AssetID = string(o.Info)
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeCisco2() (info LLDPInfoCisco2, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUICisco2 {
+			continue
+		}
+		switch LLDPCisco2Subtype(o.SubType) {
+		case LLDPCisco2PowerViaMDI:
+			if err = checkLLDPOrgSpecificLen(o, 1); err != nil {
+				return
+			}
+			info.PSEFourWirePoESupported = (o.Info[0] & LLDPCiscoPSESupport) > 0
+			info.PDSparePairArchitectureShared = (o.Info[0] & LLDPCiscoArchShared) > 0
+			info.PDRequestSparePairPoEOn = (o.Info[0] & LLDPCiscoPDSparePair) > 0
+			info.PSESparePairPoEOn = (o.Info[0] & LLDPCiscoPSESparePair) > 0
+		}
+	}
+	return
+}
+
+func (l *LinkLayerDiscoveryInfo) DecodeProfinet() (info LLDPInfoProfinet, err error) {
+	for _, o := range l.OrgTLVs {
+		if o.OUI != IEEEOUIProfinet {
+			continue
+		}
+		switch LLDPProfinetSubtype(o.SubType) {
+		case LLDPProfinetPNIODelay:
+			if err = checkLLDPOrgSpecificLen(o, 20); err != nil {
+				return
+			}
+			info.PNIODelay.RXLocal = binary.BigEndian.Uint32(o.Info[0:4])
+			info.PNIODelay.RXRemote = binary.BigEndian.Uint32(o.Info[4:8])
+			info.PNIODelay.TXLocal = binary.BigEndian.Uint32(o.Info[8:12])
+			info.PNIODelay.TXRemote = binary.BigEndian.Uint32(o.Info[12:16])
+			info.PNIODelay.CableLocal = binary.BigEndian.Uint32(o.Info[16:20])
+		case LLDPProfinetPNIOPortStatus:
+			if err = checkLLDPOrgSpecificLen(o, 4); err != nil {
+				return
+			}
+			info.PNIOPortStatus.Class2 = binary.BigEndian.Uint16(o.Info[0:2])
+			info.PNIOPortStatus.Class3 = binary.BigEndian.Uint16(o.Info[2:4])
+		case LLDPProfinetPNIOMRPPortStatus:
+			if err = checkLLDPOrgSpecificLen(o, 18); err != nil {
+				return
+			}
+			info.PNIOMRPPortStatus.UUID = o.Info[0:16]
+			info.PNIOMRPPortStatus.Status = binary.BigEndian.Uint16(o.Info[16:18])
+		case LLDPProfinetPNIOChassisMAC:
+			if err = checkLLDPOrgSpecificLen(o, 6); err != nil {
+				return
+			}
+			info.ChassisMAC = o.Info[0:6]
+		case LLDPProfinetPNIOPTCPStatus:
+			if err = checkLLDPOrgSpecificLen(o, 54); err != nil {
+				return
+			}
+			info.PNIOPTCPStatus.MasterAddress = o.Info[0:6]
+			info.PNIOPTCPStatus.SubdomainUUID = o.Info[6:22]
+			info.PNIOPTCPStatus.IRDataUUID = o.Info[22:38]
+			b := binary.BigEndian.Uint32(o.Info[38:42])
+			info.PNIOPTCPStatus.PeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.PeriodLength = b & 0x7fffffff
+			b = binary.BigEndian.Uint32(o.Info[42:46])
+			info.PNIOPTCPStatus.RedPeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.RedPeriodBegin = b & 0x7fffffff
+			b = binary.BigEndian.Uint32(o.Info[46:50])
+			info.PNIOPTCPStatus.OrangePeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.OrangePeriodBegin = b & 0x7fffffff
+			b = binary.BigEndian.Uint32(o.Info[50:54])
+			info.PNIOPTCPStatus.GreenPeriodValid = (b & 0x80000000) > 0
+			info.PNIOPTCPStatus.GreenPeriodBegin = b & 0x7fffffff
+		}
+	}
+	return
+}
+
+// LayerType returns gopacket.LayerTypeLinkLayerDiscoveryInfo.
+func (c *LinkLayerDiscoveryInfo) LayerType() gopacket.LayerType {
+	return LayerTypeLinkLayerDiscoveryInfo
+}
+
+func getCapabilities(v uint16) (c LLDPCapabilities) {
+	c.Other = (v&LLDPCapsOther > 0)
+	c.Repeater = (v&LLDPCapsRepeater > 0)
+	c.Bridge = (v&LLDPCapsBridge > 0)
+	c.WLANAP = (v&LLDPCapsWLANAP > 0)
+	c.Router = (v&LLDPCapsRouter > 0)
+	c.Phone = (v&LLDPCapsPhone > 0)
+	c.DocSis = (v&LLDPCapsDocSis > 0)
+	c.StationOnly = (v&LLDPCapsStationOnly > 0)
+	c.CVLAN = (v&LLDPCapsCVLAN > 0)
+	c.SVLAN = (v&LLDPCapsSVLAN > 0)
+	c.TMPR = (v&LLDPCapsTmpr > 0)
+	return
+}
+
+func getEVBCapabilities(v uint16) (c LLDPEVBCapabilities) {
+	c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+	c.StandardBridging = (v & LLDPEVBCapsSTD) > 0
+	c.ReflectiveRelay = (v & LLDPEVBCapsRR) > 0
+	c.RetransmissionTimerExponent = (v & LLDPEVBCapsRTE) > 0
+	c.EdgeControlProtocol = (v & LLDPEVBCapsECP) > 0
+	c.VSIDiscoveryProtocol = (v & LLDPEVBCapsVDP) > 0
+	return
+}
+
+func (t LLDPTLVType) String() (s string) {
+	switch t {
+	case LLDPTLVEnd:
+		s = "TLV End"
+	case LLDPTLVChassisID:
+		s = "Chassis ID"
+	case LLDPTLVPortID:
+		s = "Port ID"
+	case LLDPTLVTTL:
+		s = "TTL"
+	case LLDPTLVPortDescription:
+		s = "Port Description"
+	case LLDPTLVSysName:
+		s = "System Name"
+	case LLDPTLVSysDescription:
+		s = "System Description"
+	case LLDPTLVSysCapabilities:
+		s = "System Capabilities"
+	case LLDPTLVMgmtAddress:
+		s = "Management Address"
+	case LLDPTLVOrgSpecific:
+		s = "Organisation Specific"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPChassisIDSubType) String() (s string) {
+	switch t {
+	case LLDPChassisIDSubTypeReserved:
+		s = "Reserved"
+	case LLDPChassisIDSubTypeChassisComp:
+		s = "Chassis Component"
+	case LLDPChassisIDSubtypeIfaceAlias:
+		s = "Interface Alias"
+	case LLDPChassisIDSubTypePortComp:
+		s = "Port Component"
+	case LLDPChassisIDSubTypeMACAddr:
+		s = "MAC Address"
+	case LLDPChassisIDSubTypeNetworkAddr:
+		s = "Network Address"
+	case LLDPChassisIDSubtypeIfaceName:
+		s = "Interface Name"
+	case LLDPChassisIDSubTypeLocal:
+		s = "Local"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPortIDSubType) String() (s string) {
+	switch t {
+	case LLDPPortIDSubtypeReserved:
+		s = "Reserved"
+	case LLDPPortIDSubtypeIfaceAlias:
+		s = "Interface Alias"
+	case LLDPPortIDSubtypePortComp:
+		s = "Port Component"
+	case LLDPPortIDSubtypeMACAddr:
+		s = "MAC Address"
+	case LLDPPortIDSubtypeNetworkAddr:
+		s = "Network Address"
+	case LLDPPortIDSubtypeIfaceName:
+		s = "Interface Name"
+	case LLDPPortIDSubtypeAgentCircuitID:
+		s = "Agent Circuit ID"
+	case LLDPPortIDSubtypeLocal:
+		s = "Local"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t IANAAddressFamily) String() (s string) {
+	switch t {
+	case IANAAddressFamilyReserved:
+		s = "Reserved"
+	case IANAAddressFamilyIPV4:
+		s = "IPv4"
+	case IANAAddressFamilyIPV6:
+		s = "IPv6"
+	case IANAAddressFamilyNSAP:
+		s = "NSAP"
+	case IANAAddressFamilyHDLC:
+		s = "HDLC"
+	case IANAAddressFamilyBBN1822:
+		s = "BBN 1822"
+	case IANAAddressFamily802:
+		s = "802 media plus Ethernet 'canonical format'"
+	case IANAAddressFamilyE163:
+		s = "E.163"
+	case IANAAddressFamilyE164:
+		s = "E.164 (SMDS, Frame Relay, ATM)"
+	case IANAAddressFamilyF69:
+		s = "F.69 (Telex)"
+	case IANAAddressFamilyX121:
+		s = "X.121, X.25, Frame Relay"
+	case IANAAddressFamilyIPX:
+		s = "IPX"
+	case IANAAddressFamilyAtalk:
+		s = "Appletalk"
+	case IANAAddressFamilyDecnet:
+		s = "Decnet IV"
+	case IANAAddressFamilyBanyan:
+		s = "Banyan Vines"
+	case IANAAddressFamilyE164NSAP:
+		s = "E.164 with NSAP format subaddress"
+	case IANAAddressFamilyDNS:
+		s = "DNS"
+	case IANAAddressFamilyDistname:
+		s = "Distinguished Name"
+	case IANAAddressFamilyASNumber:
+		s = "AS Number"
+	case IANAAddressFamilyXTPIPV4:
+		s = "XTP over IP version 4"
+	case IANAAddressFamilyXTPIPV6:
+		s = "XTP over IP version 6"
+	case IANAAddressFamilyXTP:
+		s = "XTP native mode XTP"
+	case IANAAddressFamilyFcWWPN:
+		s = "Fibre Channel World-Wide Port Name"
+	case IANAAddressFamilyFcWWNN:
+		s = "Fibre Channel World-Wide Node Name"
+	case IANAAddressFamilyGWID:
+		s = "GWID"
+	case IANAAddressFamilyL2VPN:
+		s = "AFI for Layer 2 VPN"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPInterfaceSubtype) String() (s string) {
+	switch t {
+	case LLDPInterfaceSubtypeUnknown:
+		s = "Unknown"
+	case LLDPInterfaceSubtypeifIndex:
+		s = "IfIndex"
+	case LLDPInterfaceSubtypeSysPort:
+		s = "System Port Number"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPowerType) String() (s string) {
+	switch t {
+	case 0:
+		s = "Type 2 PSE Device"
+	case 1:
+		s = "Type 2 PD Device"
+	case 2:
+		s = "Type 1 PSE Device"
+	case 3:
+		s = "Type 1 PD Device"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPowerSource) String() (s string) {
+	switch t {
+	// PD Device
+	case 0:
+		s = "Unknown"
+	case 1:
+		s = "PSE"
+	case 2:
+		s = "Local"
+	case 3:
+		s = "PSE and Local"
+	// PSE Device  (Actual value  + 128)
+	case 128:
+		s = "Unknown"
+	case 129:
+		s = "Primary Power Source"
+	case 130:
+		s = "Backup Power Source"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPPowerPriority) String() (s string) {
+	switch t {
+	case 0:
+		s = "Unknown"
+	case 1:
+		s = "Critical"
+	case 2:
+		s = "High"
+	case 3:
+		s = "Low"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPMediaSubtype) String() (s string) {
+	switch t {
+	case LLDPMediaTypeCapabilities:
+		s = "Media Capabilities "
+	case LLDPMediaTypeNetwork:
+		s = "Network Policy"
+	case LLDPMediaTypeLocation:
+		s = "Location Identification"
+	case LLDPMediaTypePower:
+		s = "Extended Power-via-MDI"
+	case LLDPMediaTypeHardware:
+		s = "Hardware Revision"
+	case LLDPMediaTypeFirmware:
+		s = "Firmware Revision"
+	case LLDPMediaTypeSoftware:
+		s = "Software Revision"
+	case LLDPMediaTypeSerial:
+		s = "Serial Number"
+	case LLDPMediaTypeManufacturer:
+		s = "Manufacturer"
+	case LLDPMediaTypeModel:
+		s = "Model"
+	case LLDPMediaTypeAssetID:
+		s = "Asset ID"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPMediaClass) String() (s string) {
+	switch t {
+	case LLDPMediaClassUndefined:
+		s = "Undefined"
+	case LLDPMediaClassEndpointI:
+		s = "Endpoint Class I"
+	case LLDPMediaClassEndpointII:
+		s = "Endpoint Class II"
+	case LLDPMediaClassEndpointIII:
+		s = "Endpoint Class III"
+	case LLDPMediaClassNetwork:
+		s = "Network connectivity "
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPApplicationType) String() (s string) {
+	switch t {
+	case LLDPAppTypeReserved:
+		s = "Reserved"
+	case LLDPAppTypeVoice:
+		s = "Voice"
+	case LLDPappTypeVoiceSignaling:
+		s = "Voice Signaling"
+	case LLDPappTypeGuestVoice:
+		s = "Guest Voice"
+	case LLDPappTypeGuestVoiceSignaling:
+		s = "Guest Voice Signaling"
+	case LLDPappTypeSoftphoneVoice:
+		s = "Softphone Voice"
+	case LLDPappTypeVideoConferencing:
+		s = "Video Conferencing"
+	case LLDPappTypeStreamingVideo:
+		s = "Streaming Video"
+	case LLDPappTypeVideoSignaling:
+		s = "Video Signaling"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPLocationFormat) String() (s string) {
+	switch t {
+	case LLDPLocationFormatInvalid:
+		s = "Invalid"
+	case LLDPLocationFormatCoordinate:
+		s = "Coordinate-based LCI"
+	case LLDPLocationFormatAddress:
+		s = "Address-based LCO"
+	case LLDPLocationFormatECS:
+		s = "ECS ELIN"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t LLDPLocationAddressType) String() (s string) {
+	switch t {
+	case LLDPLocationAddressTypeLanguage:
+		s = "Language"
+	case LLDPLocationAddressTypeNational:
+		s = "National subdivisions (province, state, etc)"
+	case LLDPLocationAddressTypeCounty:
+		s = "County, parish, district"
+	case LLDPLocationAddressTypeCity:
+		s = "City, township"
+	case LLDPLocationAddressTypeCityDivision:
+		s = "City division, borough, ward"
+	case LLDPLocationAddressTypeNeighborhood:
+		s = "Neighborhood, block"
+	case LLDPLocationAddressTypeStreet:
+		s = "Street"
+	case LLDPLocationAddressTypeLeadingStreet:
+		s = "Leading street direction"
+	case LLDPLocationAddressTypeTrailingStreet:
+		s = "Trailing street suffix"
+	case LLDPLocationAddressTypeStreetSuffix:
+		s = "Street suffix"
+	case LLDPLocationAddressTypeHouseNum:
+		s = "House number"
+	case LLDPLocationAddressTypeHouseSuffix:
+		s = "House number suffix"
+	case LLDPLocationAddressTypeLandmark:
+		s = "Landmark or vanity address"
+	case LLDPLocationAddressTypeAdditional:
+		s = "Additional location information"
+	case LLDPLocationAddressTypeName:
+		s = "Name"
+	case LLDPLocationAddressTypePostal:
+		s = "Postal/ZIP code"
+	case LLDPLocationAddressTypeBuilding:
+		s = "Building"
+	case LLDPLocationAddressTypeUnit:
+		s = "Unit"
+	case LLDPLocationAddressTypeFloor:
+		s = "Floor"
+	case LLDPLocationAddressTypeRoom:
+		s = "Room number"
+	case LLDPLocationAddressTypePlace:
+		s = "Place type"
+	case LLDPLocationAddressTypeScript:
+		s = "Script"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func checkLLDPTLVLen(v LinkLayerDiscoveryValue, l int) (err error) {
+	if len(v.Value) < l {
+		err = fmt.Errorf("Invalid TLV %v length %d (wanted mimimum %v", v.Type, len(v.Value), l)
+	}
+	return
+}
+
+func checkLLDPOrgSpecificLen(o LLDPOrgSpecificTLV, l int) (err error) {
+	if len(o.Info) < l {
+		err = fmt.Errorf("Invalid Org Specific TLV %v length %d (wanted minimum %v)", o.SubType, len(o.Info), l)
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/layers/loopback.go b/vendor/github.com/google/gopacket/layers/loopback.go
new file mode 100644
index 0000000..839f760
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/loopback.go
@@ -0,0 +1,80 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// Loopback contains the header for loopback encapsulation.  This header is
+// used by both BSD and OpenBSD style loopback decoding (pcap's DLT_NULL
+// and DLT_LOOP, respectively).
+type Loopback struct {
+	BaseLayer
+	Family ProtocolFamily
+}
+
+// LayerType returns LayerTypeLoopback.
+func (l *Loopback) LayerType() gopacket.LayerType { return LayerTypeLoopback }
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (l *Loopback) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		return errors.New("Loopback packet too small")
+	}
+
+	// The protocol could be either big-endian or little-endian, we're
+	// not sure.  But we're PRETTY sure that the value is less than
+	// 256, so we can check the first two bytes.
+	var prot uint32
+	if data[0] == 0 && data[1] == 0 {
+		prot = binary.BigEndian.Uint32(data[:4])
+	} else {
+		prot = binary.LittleEndian.Uint32(data[:4])
+	}
+	if prot > 0xFF {
+		return fmt.Errorf("Invalid loopback protocol %q", data[:4])
+	}
+
+	l.Family = ProtocolFamily(prot)
+	l.BaseLayer = BaseLayer{data[:4], data[4:]}
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (l *Loopback) CanDecode() gopacket.LayerClass {
+	return LayerTypeLoopback
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (l *Loopback) NextLayerType() gopacket.LayerType {
+	return l.Family.LayerType()
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (l *Loopback) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	binary.LittleEndian.PutUint32(bytes, uint32(l.Family))
+	return nil
+}
+
+func decodeLoopback(data []byte, p gopacket.PacketBuilder) error {
+	l := Loopback{}
+	if err := l.DecodeFromBytes(data, gopacket.NilDecodeFeedback); err != nil {
+		return err
+	}
+	p.AddLayer(&l)
+	return p.NextDecoder(l.Family)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv1.go b/vendor/github.com/google/gopacket/layers/mldv1.go
new file mode 100644
index 0000000..e1bb1dc
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv1.go
@@ -0,0 +1,182 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+// MLDv1Message represents the common structure of all MLDv1 messages
+type MLDv1Message struct {
+	BaseLayer
+	// 3.4. Maximum Response Delay
+	MaximumResponseDelay time.Duration
+	// 3.6. Multicast Address
+	// Zero in general query
+	// Specific IPv6 multicast address otherwise
+	MulticastAddress net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1Message) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less than 20 bytes for Multicast Listener Query Message V1")
+	}
+
+	m.MaximumResponseDelay = time.Duration(binary.BigEndian.Uint16(data[0:2])) * time.Millisecond
+	// data[2:4] is reserved and not used in mldv1
+	m.MulticastAddress = data[4:20]
+
+	return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv1Message) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv1Message) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	if m.MaximumResponseDelay < 0 {
+		return errors.New("maximum response delay must not be negative")
+	}
+	dms := m.MaximumResponseDelay / time.Millisecond
+	if dms > math.MaxUint16 {
+		return fmt.Errorf("maximum response delay %dms is more than the allowed 65535ms", dms)
+	}
+	binary.BigEndian.PutUint16(buf[0:2], uint16(dms))
+
+	copy(buf[2:4], []byte{0x0, 0x0})
+
+	ma16 := m.MulticastAddress.To16()
+	if ma16 == nil {
+		return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+	}
+	copy(buf[4:20], ma16)
+
+	return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv1Message) String() string {
+	return fmt.Sprintf(
+		"Maximum Response Delay: %dms, Multicast Address: %s",
+		m.MaximumResponseDelay/time.Millisecond,
+		m.MulticastAddress)
+}
+
+// MLDv1MulticastListenerQueryMessage are sent by the router to determine
+// whether there are multicast listeners on the link.
+// https://tools.ietf.org/html/rfc2710 Page 5
+type MLDv1MulticastListenerQueryMessage struct {
+	MLDv1Message
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv1MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	err := m.MLDv1Message.DecodeFromBytes(data, df)
+	if err != nil {
+		return err
+	}
+
+	if len(data) > 20 {
+		m.Payload = data[20:]
+	}
+
+	return nil
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerQuery.
+func (*MLDv1MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv1MulticastListenerQuery
+}
+
+// IsGeneralQuery is true when this is a general query.
+// In a Query message, the Multicast Address field is set to zero when
+// sending a General Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsGeneralQuery() bool {
+	return net.IPv6zero.Equal(m.MulticastAddress)
+}
+
+// IsSpecificQuery is true when this is not a general query.
+// In a Query message, the Multicast Address field is set to a specific
+// IPv6 multicast address when sending a Multicast-Address-Specific Query.
+// https://tools.ietf.org/html/rfc2710#section-3.6
+func (m *MLDv1MulticastListenerQueryMessage) IsSpecificQuery() bool {
+	return !m.IsGeneralQuery()
+}
+
+// MLDv1MulticastListenerReportMessage is sent by a client listening on
+// a specific multicast address to indicate that it is (still) listening
+// on the specific multicast address.
+// https://tools.ietf.org/html/rfc2710 Page 6
+type MLDv1MulticastListenerReportMessage struct {
+	MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerReport.
+func (*MLDv1MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv1MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv1MulticastListenerReport
+}
+
+// MLDv1MulticastListenerDoneMessage should be sent by a client when it ceases
+// to listen to a multicast address on an interface.
+// https://tools.ietf.org/html/rfc2710 Page 7
+type MLDv1MulticastListenerDoneMessage struct {
+	MLDv1Message
+}
+
+// LayerType returns LayerTypeMLDv1MulticastListenerDone.
+func (*MLDv1MulticastListenerDoneMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv1MulticastListenerDone
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv1MulticastListenerDoneMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv1MulticastListenerDone
+}
+
+func decodeMLDv1MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv1MulticastListenerReportMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv1MulticastListenerQueryMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv1MulticastListenerDone(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv1MulticastListenerDoneMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/mldv2.go b/vendor/github.com/google/gopacket/layers/mldv2.go
new file mode 100644
index 0000000..05100a5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mldv2.go
@@ -0,0 +1,619 @@
+// Copyright 2018 GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"math"
+	"net"
+	"time"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// S Flag bit is 1
+	mldv2STrue uint8 = 0x8
+
+	// S Flag value mask
+	//   mldv2STrue & mldv2SMask == mldv2STrue  // true
+	//          0x1 & mldv2SMask == mldv2STrue  // true
+	//          0x0 & mldv2SMask == mldv2STrue  // false
+	mldv2SMask uint8 = 0x8
+
+	// QRV value mask
+	mldv2QRVMask uint8 = 0x7
+)
+
+// MLDv2MulticastListenerQueryMessage are sent by multicast routers to query the
+// multicast listening state of neighboring interfaces.
+// https://tools.ietf.org/html/rfc3810#section-5.1
+//
+// Some information, like Maximum Response Code and Multicast Address are in the
+// previous layer LayerTypeMLDv1MulticastListenerQuery
+type MLDv2MulticastListenerQueryMessage struct {
+	BaseLayer
+	// 5.1.3. Maximum Response Delay COde
+	MaximumResponseCode uint16
+	// 5.1.5. Multicast Address
+	// Zero in general query
+	// Specific IPv6 multicast address otherwise
+	MulticastAddress net.IP
+	// 5.1.7. S Flag (Suppress Router-Side Processing)
+	SuppressRoutersideProcessing bool
+	// 5.1.8. QRV (Querier's Robustness Variable)
+	QueriersRobustnessVariable uint8
+	// 5.1.9. QQIC (Querier's Query Interval Code)
+	QueriersQueryIntervalCode uint8
+	// 5.1.10. Number of Sources (N)
+	NumberOfSources uint16
+	// 5.1.11 Source Address [i]
+	SourceAddresses []net.IP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerQueryMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 24 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less than 24 bytes for Multicast Listener Query Message V2")
+	}
+
+	m.MaximumResponseCode = binary.BigEndian.Uint16(data[0:2])
+	// ignore data[2:4] as per https://tools.ietf.org/html/rfc3810#section-5.1.4
+	m.MulticastAddress = data[4:20]
+	m.SuppressRoutersideProcessing = (data[20] & mldv2SMask) == mldv2STrue
+	m.QueriersRobustnessVariable = data[20] & mldv2QRVMask
+	m.QueriersQueryIntervalCode = data[21]
+
+	m.NumberOfSources = binary.BigEndian.Uint16(data[22:24])
+
+	var end int
+	for i := uint16(0); i < m.NumberOfSources; i++ {
+		begin := 24 + (int(i) * 16)
+		end = begin + 16
+
+		if end > len(data) {
+			df.SetTruncated()
+			return fmt.Errorf("ICMP layer less than %d bytes for Multicast Listener Query Message V2", end)
+		}
+
+		m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+	}
+
+	return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerQueryMessage) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerQueryMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(24)
+	if err != nil {
+		return err
+	}
+
+	binary.BigEndian.PutUint16(buf[0:2], m.MaximumResponseCode)
+	copy(buf[2:4], []byte{0x00, 0x00}) // set reserved bytes to zero
+
+	ma16 := m.MulticastAddress.To16()
+	if ma16 == nil {
+		return fmt.Errorf("invalid MulticastAddress '%s'", m.MulticastAddress)
+	}
+	copy(buf[4:20], ma16)
+
+	byte20 := m.QueriersRobustnessVariable & mldv2QRVMask
+	if m.SuppressRoutersideProcessing {
+		byte20 |= mldv2STrue
+	} else {
+		byte20 &= ^mldv2STrue // the complement of mldv2STrue
+	}
+	byte20 &= 0x0F // set reserved bits to zero
+	buf[20] = byte20
+
+	binary.BigEndian.PutUint16(buf[22:24], m.NumberOfSources)
+	buf[21] = m.QueriersQueryIntervalCode
+
+	return nil
+}
+
+// writes each source address to the buffer preserving the order
+func (m *MLDv2MulticastListenerQueryMessage) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	numberOfSourceAddresses := len(m.SourceAddresses)
+	if numberOfSourceAddresses > math.MaxUint16 {
+		return fmt.Errorf(
+			"there are more than %d source addresses, but 65535 is the maximum number of supported addresses",
+			numberOfSourceAddresses)
+	}
+
+	if opts.FixLengths {
+		m.NumberOfSources = uint16(numberOfSourceAddresses)
+	}
+
+	lastSAIdx := numberOfSourceAddresses - 1
+	for k := range m.SourceAddresses {
+		i := lastSAIdx - k // reverse order
+
+		buf, err := b.PrependBytes(16)
+		if err != nil {
+			return err
+		}
+
+		sa16 := m.SourceAddresses[i].To16()
+		if sa16 == nil {
+			return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+		}
+		copy(buf[0:16], sa16)
+	}
+
+	return nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerQueryMessage) String() string {
+	return fmt.Sprintf(
+		"Maximum Response Code: %#x (%dms), Multicast Address: %s, Suppress Routerside Processing: %t, QRV: %#x, QQIC: %#x (%ds), Number of Source Address: %d (actual: %d), Source Addresses: %s",
+		m.MaximumResponseCode,
+		m.MaximumResponseDelay(),
+		m.MulticastAddress,
+		m.SuppressRoutersideProcessing,
+		m.QueriersRobustnessVariable,
+		m.QueriersQueryIntervalCode,
+		m.QQI()/time.Second,
+		m.NumberOfSources,
+		len(m.SourceAddresses),
+		m.SourceAddresses)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerQueryMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerQueryMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv2MulticastListenerQuery
+}
+
+// QQI calculates the Querier's Query Interval based on the QQIC
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) QQI() time.Duration {
+	data := m.QueriersQueryIntervalCode
+	if data < 128 {
+		return time.Second * time.Duration(data)
+	}
+
+	exp := uint16(data) & 0x70 >> 4
+	mant := uint16(data) & 0x0F
+	return time.Second * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetQQI calculates and updates the Querier's Query Interval Code (QQIC)
+// according to https://tools.ietf.org/html/rfc3810#section-5.1.9
+func (m *MLDv2MulticastListenerQueryMessage) SetQQI(d time.Duration) error {
+	if d < 0 {
+		m.QueriersQueryIntervalCode = 0
+		return errors.New("QQI duration is negative")
+	}
+
+	if d == 0 {
+		m.QueriersQueryIntervalCode = 0
+		return nil
+	}
+
+	dms := d / time.Second
+	if dms < 128 {
+		m.QueriersQueryIntervalCode = uint8(dms)
+	}
+
+	if dms > 31744 { // mant=0xF, exp=0x7
+		m.QueriersQueryIntervalCode = 0xFF
+		return fmt.Errorf("QQI duration %ds is, maximum allowed is 31744s", dms)
+	}
+
+	value := uint16(dms) // ok, because 31744 < math.MaxUint16
+	exp := uint8(7)
+	for mask := uint16(0x4000); exp > 0; exp-- {
+		if mask&value != 0 {
+			break
+		}
+
+		mask >>= 1
+	}
+
+	mant := uint8(0x000F & (value >> (exp + 3)))
+	sig := uint8(0x10)
+	m.QueriersQueryIntervalCode = sig | exp<<4 | mant
+
+	return nil
+}
+
+// MaximumResponseDelay returns the Maximum Response Delay based on the
+// Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) MaximumResponseDelay() time.Duration {
+	if m.MaximumResponseCode < 0x8000 {
+		return time.Duration(m.MaximumResponseCode)
+	}
+
+	exp := m.MaximumResponseCode & 0x7000 >> 12
+	mant := m.MaximumResponseCode & 0x0FFF
+
+	return time.Millisecond * time.Duration(mant|0x1000<<(exp+3))
+}
+
+// SetMLDv2MaximumResponseDelay updates the Maximum Response Code according to
+// https://tools.ietf.org/html/rfc3810#section-5.1.3
+func (m *MLDv2MulticastListenerQueryMessage) SetMLDv2MaximumResponseDelay(d time.Duration) error {
+	if d == 0 {
+		m.MaximumResponseCode = 0
+		return nil
+	}
+
+	if d < 0 {
+		return errors.New("maximum response delay must not be negative")
+	}
+
+	dms := d / time.Millisecond
+
+	if dms < 32768 {
+		m.MaximumResponseCode = uint16(dms)
+	}
+
+	if dms > 4193280 { // mant=0xFFF, exp=0x7
+		return fmt.Errorf("maximum response delay %dms is bigger the than maximum of 4193280ms", dms)
+	}
+
+	value := uint32(dms) // ok, because 4193280 < math.MaxUint32
+	exp := uint8(7)
+	for mask := uint32(0x40000000); exp > 0; exp-- {
+		if mask&value != 0 {
+			break
+		}
+
+		mask >>= 1
+	}
+
+	mant := uint16(0x00000FFF & (value >> (exp + 3)))
+	sig := uint16(0x1000)
+	m.MaximumResponseCode = sig | uint16(exp)<<12 | mant
+	return nil
+}
+
+// MLDv2MulticastListenerReportMessage is sent by an IP node to report the
+// current multicast listening state, or changes therein.
+// https://tools.ietf.org/html/rfc3810#section-5.2
+type MLDv2MulticastListenerReportMessage struct {
+	BaseLayer
+	// 5.2.3. Nr of Mcast Address Records
+	NumberOfMulticastAddressRecords uint16
+	// 5.2.4. Multicast Address Record [i]
+	MulticastAddressRecords []MLDv2MulticastAddressRecord
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (m *MLDv2MulticastListenerReportMessage) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return errors.New("ICMP layer less than 4 bytes for Multicast Listener Report Message V2")
+	}
+
+	// ignore data[0:2] as per RFC
+	// https://tools.ietf.org/html/rfc3810#section-5.2.1
+	m.NumberOfMulticastAddressRecords = binary.BigEndian.Uint16(data[2:4])
+
+	begin := 4
+	for i := uint16(0); i < m.NumberOfMulticastAddressRecords; i++ {
+		mar := MLDv2MulticastAddressRecord{}
+		read, err := mar.decode(data[begin:], df)
+		if err != nil {
+			return err
+		}
+
+		m.MulticastAddressRecords = append(m.MulticastAddressRecords, mar)
+
+		begin += read
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MLDv2MulticastListenerReportMessage) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	lastItemIdx := len(m.MulticastAddressRecords) - 1
+	for k := range m.MulticastAddressRecords {
+		i := lastItemIdx - k // reverse order
+
+		err := m.MulticastAddressRecords[i].serializeTo(b, opts)
+		if err != nil {
+			return err
+		}
+	}
+
+	if opts.FixLengths {
+		numberOfMAR := len(m.MulticastAddressRecords)
+		if numberOfMAR > math.MaxUint16 {
+			return fmt.Errorf(
+				"%d multicast address records added, but the maximum is 65535",
+				numberOfMAR)
+		}
+
+		m.NumberOfMulticastAddressRecords = uint16(numberOfMAR)
+	}
+
+	buf, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+
+	copy(buf[0:2], []byte{0x0, 0x0})
+	binary.BigEndian.PutUint16(buf[2:4], m.NumberOfMulticastAddressRecords)
+	return nil
+}
+
+// Sums this layer up nicely formatted
+func (m *MLDv2MulticastListenerReportMessage) String() string {
+	return fmt.Sprintf(
+		"Number of Mcast Addr Records: %d (actual %d), Multicast Address Records: %+v",
+		m.NumberOfMulticastAddressRecords,
+		len(m.MulticastAddressRecords),
+		m.MulticastAddressRecords)
+}
+
+// LayerType returns LayerTypeMLDv2MulticastListenerQuery.
+func (*MLDv2MulticastListenerReportMessage) LayerType() gopacket.LayerType {
+	return LayerTypeMLDv2MulticastListenerReport
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (*MLDv2MulticastListenerReportMessage) CanDecode() gopacket.LayerClass {
+	return LayerTypeMLDv2MulticastListenerReport
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (*MLDv2MulticastListenerReportMessage) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// MLDv2MulticastAddressRecordType holds the type of a
+// Multicast Address Record, according to
+// https://tools.ietf.org/html/rfc3810#section-5.2.5 and
+// https://tools.ietf.org/html/rfc3810#section-5.2.12
+type MLDv2MulticastAddressRecordType uint8
+
+const (
+	// MLDv2MulticastAddressRecordTypeModeIsIncluded stands for
+	// MODE_IS_INCLUDE - indicates that the interface has a filter
+	// mode of INCLUDE for the specified multicast address.
+	MLDv2MulticastAddressRecordTypeModeIsIncluded MLDv2MulticastAddressRecordType = 1
+	// MLDv2MulticastAddressRecordTypeModeIsExcluded stands for
+	// MODE_IS_EXCLUDE - indicates that the interface has a filter
+	// mode of EXCLUDE for the specified multicast address.
+	MLDv2MulticastAddressRecordTypeModeIsExcluded MLDv2MulticastAddressRecordType = 2
+	// MLDv2MulticastAddressRecordTypeChangeToIncludeMode stands for
+	// CHANGE_TO_INCLUDE_MODE - indicates that the interface has
+	// changed to INCLUDE filter mode for the specified multicast
+	// address.
+	MLDv2MulticastAddressRecordTypeChangeToIncludeMode MLDv2MulticastAddressRecordType = 3
+	// MLDv2MulticastAddressRecordTypeChangeToExcludeMode stands for
+	// CHANGE_TO_EXCLUDE_MODE - indicates that the interface has
+	// changed to EXCLUDE filter mode for the specified multicast
+	// address
+	MLDv2MulticastAddressRecordTypeChangeToExcludeMode MLDv2MulticastAddressRecordType = 4
+	// MLDv2MulticastAddressRecordTypeAllowNewSources stands for
+	// ALLOW_NEW_SOURCES - indicates that the Source Address [i]
+	// fields in this Multicast Address Record contain a list of
+	// the additional sources that the node wishes to listen to,
+	// for packets sent to the specified multicast address.
+	MLDv2MulticastAddressRecordTypeAllowNewSources MLDv2MulticastAddressRecordType = 5
+	// MLDv2MulticastAddressRecordTypeBlockOldSources stands for
+	// BLOCK_OLD_SOURCES - indicates that the Source Address [i]
+	// fields in this Multicast Address Record contain a list of
+	// the sources that the node no longer wishes to listen to,
+	// for packets sent to the specified multicast address.
+	MLDv2MulticastAddressRecordTypeBlockOldSources MLDv2MulticastAddressRecordType = 6
+)
+
+// Human readable record types
+// Naming follows https://tools.ietf.org/html/rfc3810#section-5.2.12
+func (m MLDv2MulticastAddressRecordType) String() string {
+	switch m {
+	case MLDv2MulticastAddressRecordTypeModeIsIncluded:
+		return "MODE_IS_INCLUDE"
+	case MLDv2MulticastAddressRecordTypeModeIsExcluded:
+		return "MODE_IS_EXCLUDE"
+	case MLDv2MulticastAddressRecordTypeChangeToIncludeMode:
+		return "CHANGE_TO_INCLUDE_MODE"
+	case MLDv2MulticastAddressRecordTypeChangeToExcludeMode:
+		return "CHANGE_TO_EXCLUDE_MODE"
+	case MLDv2MulticastAddressRecordTypeAllowNewSources:
+		return "ALLOW_NEW_SOURCES"
+	case MLDv2MulticastAddressRecordTypeBlockOldSources:
+		return "BLOCK_OLD_SOURCES"
+	default:
+		return fmt.Sprintf("UNKNOWN(%d)", m)
+	}
+}
+
+// MLDv2MulticastAddressRecord contains information on the sender listening to a
+// single multicast address on the interface the report is sent.
+// https://tools.ietf.org/html/rfc3810#section-5.2.4
+type MLDv2MulticastAddressRecord struct {
+	// 5.2.5. Record Type
+	RecordType MLDv2MulticastAddressRecordType
+	// 5.2.6. Auxiliary Data Length (number of 32-bit words)
+	AuxDataLen uint8
+	// 5.2.7. Number Of Sources (N)
+	N uint16
+	// 5.2.8. Multicast Address
+	MulticastAddress net.IP
+	// 5.2.9 Source Address [i]
+	SourceAddresses []net.IP
+	// 5.2.10 Auxiliary Data
+	AuxiliaryData []byte
+}
+
+// decodes a multicast address record from bytes
+func (m *MLDv2MulticastAddressRecord) decode(data []byte, df gopacket.DecodeFeedback) (int, error) {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return 0, errors.New(
+			"Multicast Listener Report Message V2 layer less than 4 bytes for Multicast Address Record")
+	}
+
+	m.RecordType = MLDv2MulticastAddressRecordType(data[0])
+	m.AuxDataLen = data[1]
+	m.N = binary.BigEndian.Uint16(data[2:4])
+	m.MulticastAddress = data[4:20]
+
+	for i := uint16(0); i < m.N; i++ {
+		begin := 20 + (int(i) * 16)
+		end := begin + 16
+
+		if len(data) < end {
+			df.SetTruncated()
+			return begin, fmt.Errorf(
+				"Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record", end)
+		}
+
+		m.SourceAddresses = append(m.SourceAddresses, data[begin:end])
+	}
+
+	expectedLengthWithouAuxData := 20 + (int(m.N) * 16)
+	expectedTotalLength := (int(m.AuxDataLen) * 4) + expectedLengthWithouAuxData // *4 because AuxDataLen are 32bit words
+	if len(data) < expectedTotalLength {
+		return expectedLengthWithouAuxData, fmt.Errorf(
+			"Multicast Listener Report Message V2 layer less than %d bytes for Multicast Address Record",
+			expectedLengthWithouAuxData)
+	}
+
+	m.AuxiliaryData = data[expectedLengthWithouAuxData:expectedTotalLength]
+
+	return expectedTotalLength, nil
+}
+
+// String sums this layer up nicely formatted
+func (m *MLDv2MulticastAddressRecord) String() string {
+	return fmt.Sprintf(
+		"RecordType: %d (%s), AuxDataLen: %d [32-bit words], N: %d, Multicast Address: %s, SourceAddresses: %s, Auxiliary Data: %#x",
+		m.RecordType,
+		m.RecordType.String(),
+		m.AuxDataLen,
+		m.N,
+		m.MulticastAddress.To16(),
+		m.SourceAddresses,
+		m.AuxiliaryData)
+}
+
+// serializes a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if err := m.serializeAuxiliaryDataTo(b, opts); err != nil {
+		return err
+	}
+
+	if err := m.serializeSourceAddressesTo(b, opts); err != nil {
+		return err
+	}
+
+	buf, err := b.PrependBytes(20)
+	if err != nil {
+		return err
+	}
+
+	buf[0] = uint8(m.RecordType)
+	buf[1] = m.AuxDataLen
+	binary.BigEndian.PutUint16(buf[2:4], m.N)
+
+	ma16 := m.MulticastAddress.To16()
+	if ma16 == nil {
+		return fmt.Errorf("invalid multicast address '%s'", m.MulticastAddress)
+	}
+	copy(buf[4:20], ma16)
+
+	return nil
+}
+
+// serializes the auxiliary data of a multicast address record
+func (m *MLDv2MulticastAddressRecord) serializeAuxiliaryDataTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if remainder := len(m.AuxiliaryData) % 4; remainder != 0 {
+		zeroWord := []byte{0x0, 0x0, 0x0, 0x0}
+		m.AuxiliaryData = append(m.AuxiliaryData, zeroWord[:remainder]...)
+	}
+
+	if opts.FixLengths {
+		auxDataLen := len(m.AuxiliaryData) / 4
+
+		if auxDataLen > math.MaxUint8 {
+			return fmt.Errorf("auxilary data is %d 32-bit words, but the maximum is 255 32-bit words", auxDataLen)
+		}
+
+		m.AuxDataLen = uint8(auxDataLen)
+	}
+
+	buf, err := b.PrependBytes(len(m.AuxiliaryData))
+	if err != nil {
+		return err
+	}
+
+	copy(buf, m.AuxiliaryData)
+	return nil
+}
+
+// serializes the source addresses of a multicast address record preserving the order
+func (m *MLDv2MulticastAddressRecord) serializeSourceAddressesTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if opts.FixLengths {
+		numberOfSourceAddresses := len(m.SourceAddresses)
+
+		if numberOfSourceAddresses > math.MaxUint16 {
+			return fmt.Errorf(
+				"%d source addresses added, but the maximum is 65535",
+				numberOfSourceAddresses)
+		}
+
+		m.N = uint16(numberOfSourceAddresses)
+	}
+
+	lastItemIdx := len(m.SourceAddresses) - 1
+	for k := range m.SourceAddresses {
+		i := lastItemIdx - k // reverse order
+
+		buf, err := b.PrependBytes(16)
+		if err != nil {
+			return err
+		}
+
+		sa16 := m.SourceAddresses[i].To16()
+		if sa16 == nil {
+			return fmt.Errorf("invalid source address [%d] '%s'", i, m.SourceAddresses[i])
+		}
+		copy(buf, sa16)
+	}
+
+	return nil
+}
+
+func decodeMLDv2MulticastListenerReport(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv2MulticastListenerReportMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
+
+func decodeMLDv2MulticastListenerQuery(data []byte, p gopacket.PacketBuilder) error {
+	m := &MLDv2MulticastListenerQueryMessage{}
+	return decodingLayerDecoder(m, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/modbustcp.go b/vendor/github.com/google/gopacket/layers/modbustcp.go
new file mode 100644
index 0000000..5a481b0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/modbustcp.go
@@ -0,0 +1,156 @@
+// Copyright 2018, The GoPacket Authors, All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// ModbusTCP Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for ModbusTCP.
+//
+//******************************************************************************
+
+const mbapRecordSizeInBytes int = 7
+const modbusPDUMinimumRecordSizeInBytes int = 2
+const modbusPDUMaximumRecordSizeInBytes int = 253
+
+// ModbusProtocol type
+type ModbusProtocol uint16
+
+// ModbusProtocol known values.
+const (
+	ModbusProtocolModbus ModbusProtocol = 0
+)
+
+func (mp ModbusProtocol) String() string {
+	switch mp {
+	default:
+		return "Unknown"
+	case ModbusProtocolModbus:
+		return "Modbus"
+	}
+}
+
+//******************************************************************************
+
+// ModbusTCP Type
+// --------
+// Type ModbusTCP implements the DecodingLayer interface. Each ModbusTCP object
+// represents in a structured form the MODBUS Application Protocol header (MBAP) record present as the TCP
+// payload in an ModbusTCP TCP packet.
+//
+type ModbusTCP struct {
+	BaseLayer // Stores the packet bytes and payload (Modbus PDU) bytes .
+
+	TransactionIdentifier uint16         // Identification of a MODBUS Request/Response transaction
+	ProtocolIdentifier    ModbusProtocol // It is used for intra-system multiplexing
+	Length                uint16         // Number of following bytes (includes 1 byte for UnitIdentifier + Modbus data length
+	UnitIdentifier        uint8          // Identification of a remote slave connected on a serial line or on other buses
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the ModbusTCP object, which is LayerTypeModbusTCP.
+func (d *ModbusTCP) LayerType() gopacket.LayerType {
+	return LayerTypeModbusTCP
+}
+
+//******************************************************************************
+
+// decodeModbusTCP analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the ModbusTCP layer.
+func decodeModbusTCP(data []byte, p gopacket.PacketBuilder) error {
+
+	// Attempt to decode the byte slice.
+	d := &ModbusTCP{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	// If the decoding worked, add the layer to the packet and set it
+	// as the application layer too, if there isn't already one.
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+
+	return p.NextDecoder(d.NextLayerType())
+
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an ModbusTCP
+// record of a TCP packet.
+//
+// Upon succeeds, it loads the ModbusTCP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *ModbusTCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// If the data block is too short to be a MBAP record, then return an error.
+	if len(data) < mbapRecordSizeInBytes+modbusPDUMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("ModbusTCP packet too short")
+	}
+
+	if len(data) > mbapRecordSizeInBytes+modbusPDUMaximumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("ModbusTCP packet too long")
+	}
+
+	// ModbusTCP type embeds type BaseLayer which contains two fields:
+	//    Contents is supposed to contain the bytes of the data at this level (MPBA).
+	//    Payload is supposed to contain the payload of this level (PDU).
+	d.BaseLayer = BaseLayer{Contents: data[:mbapRecordSizeInBytes], Payload: data[mbapRecordSizeInBytes:len(data)]}
+
+	// Extract the fields from the block of bytes.
+	// The fields can just be copied in big endian order.
+	d.TransactionIdentifier = binary.BigEndian.Uint16(data[:2])
+	d.ProtocolIdentifier = ModbusProtocol(binary.BigEndian.Uint16(data[2:4]))
+	d.Length = binary.BigEndian.Uint16(data[4:6])
+
+	// Length should have the size of the payload plus one byte (size of UnitIdentifier)
+	if d.Length != uint16(len(d.BaseLayer.Payload)+1) {
+		df.SetTruncated()
+		return errors.New("ModbusTCP packet with wrong field value (Length)")
+	}
+	d.UnitIdentifier = uint8(data[6])
+
+	return nil
+}
+
+//******************************************************************************
+
+// NextLayerType returns the layer type of the ModbusTCP payload, which is LayerTypePayload.
+func (d *ModbusTCP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+//******************************************************************************
+
+// Payload returns Modbus Protocol Data Unit (PDU) composed by Function Code and Data, it is carried within ModbusTCP packets
+func (d *ModbusTCP) Payload() []byte {
+	return d.BaseLayer.Payload
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode
+func (s *ModbusTCP) CanDecode() gopacket.LayerClass {
+	return LayerTypeModbusTCP
+}
diff --git a/vendor/github.com/google/gopacket/layers/mpls.go b/vendor/github.com/google/gopacket/layers/mpls.go
new file mode 100644
index 0000000..83079a0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/mpls.go
@@ -0,0 +1,87 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+// MPLS is the MPLS packet header.
+type MPLS struct {
+	BaseLayer
+	Label        uint32
+	TrafficClass uint8
+	StackBottom  bool
+	TTL          uint8
+}
+
+// LayerType returns gopacket.LayerTypeMPLS.
+func (m *MPLS) LayerType() gopacket.LayerType { return LayerTypeMPLS }
+
+// ProtocolGuessingDecoder attempts to guess the protocol of the bytes it's
+// given, then decode the packet accordingly.  Its algorithm for guessing is:
+//  If the packet starts with byte 0x45-0x4F: IPv4
+//  If the packet starts with byte 0x60-0x6F: IPv6
+//  Otherwise:  Error
+// See draft-hsmit-isis-aal5mux-00.txt for more detail on this approach.
+type ProtocolGuessingDecoder struct{}
+
+func (ProtocolGuessingDecoder) Decode(data []byte, p gopacket.PacketBuilder) error {
+	switch data[0] {
+	// 0x40 | header_len, where header_len is at least 5.
+	case 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f:
+		return decodeIPv4(data, p)
+		// IPv6 can start with any byte whose first 4 bits are 0x6.
+	case 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f:
+		return decodeIPv6(data, p)
+	}
+	return errors.New("Unable to guess protocol of packet data")
+}
+
+// MPLSPayloadDecoder is the decoder used to data encapsulated by each MPLS
+// layer.  MPLS contains no type information, so we have to explicitly decide
+// which decoder to use.  This is initially set to ProtocolGuessingDecoder, our
+// simple attempt at guessing protocols based on the first few bytes of data
+// available to us.  However, if you know that in your environment MPLS always
+// encapsulates a specific protocol, you may reset this.
+var MPLSPayloadDecoder gopacket.Decoder = ProtocolGuessingDecoder{}
+
+func decodeMPLS(data []byte, p gopacket.PacketBuilder) error {
+	decoded := binary.BigEndian.Uint32(data[:4])
+	mpls := &MPLS{
+		Label:        decoded >> 12,
+		TrafficClass: uint8(decoded>>9) & 0x7,
+		StackBottom:  decoded&0x100 != 0,
+		TTL:          uint8(decoded),
+		BaseLayer:    BaseLayer{data[:4], data[4:]},
+	}
+	p.AddLayer(mpls)
+	if mpls.StackBottom {
+		return p.NextDecoder(MPLSPayloadDecoder)
+	}
+	return p.NextDecoder(gopacket.DecodeFunc(decodeMPLS))
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (m *MPLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	encoded := m.Label << 12
+	encoded |= uint32(m.TrafficClass) << 9
+	encoded |= uint32(m.TTL)
+	if m.StackBottom {
+		encoded |= 0x100
+	}
+	binary.BigEndian.PutUint32(bytes, encoded)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/ndp.go b/vendor/github.com/google/gopacket/layers/ndp.go
new file mode 100644
index 0000000..f7ca1b2
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ndp.go
@@ -0,0 +1,611 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Enum types courtesy of...
+// http://anonsvn.wireshark.org/wireshark/trunk/epan/dissectors/packet-ndp.c
+
+package layers
+
+import (
+	"fmt"
+	"github.com/google/gopacket"
+	"net"
+)
+
+type NDPChassisType uint8
+
+// Nortel Chassis Types
+const (
+	NDPChassisother                                       NDPChassisType = 1
+	NDPChassis3000                                        NDPChassisType = 2
+	NDPChassis3030                                        NDPChassisType = 3
+	NDPChassis2310                                        NDPChassisType = 4
+	NDPChassis2810                                        NDPChassisType = 5
+	NDPChassis2912                                        NDPChassisType = 6
+	NDPChassis2914                                        NDPChassisType = 7
+	NDPChassis271x                                        NDPChassisType = 8
+	NDPChassis2813                                        NDPChassisType = 9
+	NDPChassis2814                                        NDPChassisType = 10
+	NDPChassis2915                                        NDPChassisType = 11
+	NDPChassis5000                                        NDPChassisType = 12
+	NDPChassis2813SA                                      NDPChassisType = 13
+	NDPChassis2814SA                                      NDPChassisType = 14
+	NDPChassis810M                                        NDPChassisType = 15
+	NDPChassisEthercell                                   NDPChassisType = 16
+	NDPChassis5005                                        NDPChassisType = 17
+	NDPChassisAlcatelEWC                                  NDPChassisType = 18
+	NDPChassis2715SA                                      NDPChassisType = 20
+	NDPChassis2486                                        NDPChassisType = 21
+	NDPChassis28000series                                 NDPChassisType = 22
+	NDPChassis23000series                                 NDPChassisType = 23
+	NDPChassis5DN00xseries                                NDPChassisType = 24
+	NDPChassisBayStackEthernet                            NDPChassisType = 25
+	NDPChassis23100series                                 NDPChassisType = 26
+	NDPChassis100BaseTHub                                 NDPChassisType = 27
+	NDPChassis3000FastEthernet                            NDPChassisType = 28
+	NDPChassisOrionSwitch                                 NDPChassisType = 29
+	NDPChassisDDS                                         NDPChassisType = 31
+	NDPChassisCentillion6slot                             NDPChassisType = 32
+	NDPChassisCentillion12slot                            NDPChassisType = 33
+	NDPChassisCentillion1slot                             NDPChassisType = 34
+	NDPChassisBayStack301                                 NDPChassisType = 35
+	NDPChassisBayStackTokenRingHub                        NDPChassisType = 36
+	NDPChassisFVCMultimediaSwitch                         NDPChassisType = 37
+	NDPChassisSwitchNode                                  NDPChassisType = 38
+	NDPChassisBayStack302Switch                           NDPChassisType = 39
+	NDPChassisBayStack350Switch                           NDPChassisType = 40
+	NDPChassisBayStack150EthernetHub                      NDPChassisType = 41
+	NDPChassisCentillion50NSwitch                         NDPChassisType = 42
+	NDPChassisCentillion50TSwitch                         NDPChassisType = 43
+	NDPChassisBayStack303304Switches                      NDPChassisType = 44
+	NDPChassisBayStack200EthernetHub                      NDPChassisType = 45
+	NDPChassisBayStack25010100EthernetHub                 NDPChassisType = 46
+	NDPChassisBayStack450101001000Switches                NDPChassisType = 48
+	NDPChassisBayStack41010100Switches                    NDPChassisType = 49
+	NDPChassisPassport1200L3Switch                        NDPChassisType = 50
+	NDPChassisPassport1250L3Switch                        NDPChassisType = 51
+	NDPChassisPassport1100L3Switch                        NDPChassisType = 52
+	NDPChassisPassport1150L3Switch                        NDPChassisType = 53
+	NDPChassisPassport1050L3Switch                        NDPChassisType = 54
+	NDPChassisPassport1051L3Switch                        NDPChassisType = 55
+	NDPChassisPassport8610L3Switch                        NDPChassisType = 56
+	NDPChassisPassport8606L3Switch                        NDPChassisType = 57
+	NDPChassisPassport8010                                NDPChassisType = 58
+	NDPChassisPassport8006                                NDPChassisType = 59
+	NDPChassisBayStack670wirelessaccesspoint              NDPChassisType = 60
+	NDPChassisPassport740                                 NDPChassisType = 61
+	NDPChassisPassport750                                 NDPChassisType = 62
+	NDPChassisPassport790                                 NDPChassisType = 63
+	NDPChassisBusinessPolicySwitch200010100Switches       NDPChassisType = 64
+	NDPChassisPassport8110L2Switch                        NDPChassisType = 65
+	NDPChassisPassport8106L2Switch                        NDPChassisType = 66
+	NDPChassisBayStack3580GigSwitch                       NDPChassisType = 67
+	NDPChassisBayStack10PowerSupplyUnit                   NDPChassisType = 68
+	NDPChassisBayStack42010100Switch                      NDPChassisType = 69
+	NDPChassisOPTeraMetro1200EthernetServiceModule        NDPChassisType = 70
+	NDPChassisOPTera8010co                                NDPChassisType = 71
+	NDPChassisOPTera8610coL3Switch                        NDPChassisType = 72
+	NDPChassisOPTera8110coL2Switch                        NDPChassisType = 73
+	NDPChassisOPTera8003                                  NDPChassisType = 74
+	NDPChassisOPTera8603L3Switch                          NDPChassisType = 75
+	NDPChassisOPTera8103L2Switch                          NDPChassisType = 76
+	NDPChassisBayStack380101001000Switch                  NDPChassisType = 77
+	NDPChassisEthernetSwitch47048T                        NDPChassisType = 78
+	NDPChassisOPTeraMetro1450EthernetServiceModule        NDPChassisType = 79
+	NDPChassisOPTeraMetro1400EthernetServiceModule        NDPChassisType = 80
+	NDPChassisAlteonSwitchFamily                          NDPChassisType = 81
+	NDPChassisEthernetSwitch46024TPWR                     NDPChassisType = 82
+	NDPChassisOPTeraMetro8010OPML2Switch                  NDPChassisType = 83
+	NDPChassisOPTeraMetro8010coOPML2Switch                NDPChassisType = 84
+	NDPChassisOPTeraMetro8006OPML2Switch                  NDPChassisType = 85
+	NDPChassisOPTeraMetro8003OPML2Switch                  NDPChassisType = 86
+	NDPChassisAlteon180e                                  NDPChassisType = 87
+	NDPChassisAlteonAD3                                   NDPChassisType = 88
+	NDPChassisAlteon184                                   NDPChassisType = 89
+	NDPChassisAlteonAD4                                   NDPChassisType = 90
+	NDPChassisPassport1424L3Switch                        NDPChassisType = 91
+	NDPChassisPassport1648L3Switch                        NDPChassisType = 92
+	NDPChassisPassport1612L3Switch                        NDPChassisType = 93
+	NDPChassisPassport1624L3Switch                        NDPChassisType = 94
+	NDPChassisBayStack38024FFiber1000Switch               NDPChassisType = 95
+	NDPChassisEthernetRoutingSwitch551024T                NDPChassisType = 96
+	NDPChassisEthernetRoutingSwitch551048T                NDPChassisType = 97
+	NDPChassisEthernetSwitch47024T                        NDPChassisType = 98
+	NDPChassisNortelNetworksWirelessLANAccessPoint2220    NDPChassisType = 99
+	NDPChassisPassportRBS2402L3Switch                     NDPChassisType = 100
+	NDPChassisAlteonApplicationSwitch2424                 NDPChassisType = 101
+	NDPChassisAlteonApplicationSwitch2224                 NDPChassisType = 102
+	NDPChassisAlteonApplicationSwitch2208                 NDPChassisType = 103
+	NDPChassisAlteonApplicationSwitch2216                 NDPChassisType = 104
+	NDPChassisAlteonApplicationSwitch3408                 NDPChassisType = 105
+	NDPChassisAlteonApplicationSwitch3416                 NDPChassisType = 106
+	NDPChassisNortelNetworksWirelessLANSecuritySwitch2250 NDPChassisType = 107
+	NDPChassisEthernetSwitch42548T                        NDPChassisType = 108
+	NDPChassisEthernetSwitch42524T                        NDPChassisType = 109
+	NDPChassisNortelNetworksWirelessLANAccessPoint2221    NDPChassisType = 110
+	NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch  NDPChassisType = 111
+	NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch NDPChassisType = 112
+	NDPChassisPassport830010slotchassis                   NDPChassisType = 113
+	NDPChassisPassport83006slotchassis                    NDPChassisType = 114
+	NDPChassisEthernetRoutingSwitch552024TPWR             NDPChassisType = 115
+	NDPChassisEthernetRoutingSwitch552048TPWR             NDPChassisType = 116
+	NDPChassisNortelNetworksVPNGateway3050                NDPChassisType = 117
+	NDPChassisAlteonSSL31010100                           NDPChassisType = 118
+	NDPChassisAlteonSSL31010100Fiber                      NDPChassisType = 119
+	NDPChassisAlteonSSL31010100FIPS                       NDPChassisType = 120
+	NDPChassisAlteonSSL410101001000                       NDPChassisType = 121
+	NDPChassisAlteonSSL410101001000Fiber                  NDPChassisType = 122
+	NDPChassisAlteonApplicationSwitch2424SSL              NDPChassisType = 123
+	NDPChassisEthernetSwitch32524T                        NDPChassisType = 124
+	NDPChassisEthernetSwitch32524G                        NDPChassisType = 125
+	NDPChassisNortelNetworksWirelessLANAccessPoint2225    NDPChassisType = 126
+	NDPChassisNortelNetworksWirelessLANSecuritySwitch2270 NDPChassisType = 127
+	NDPChassis24portEthernetSwitch47024TPWR               NDPChassisType = 128
+	NDPChassis48portEthernetSwitch47048TPWR               NDPChassisType = 129
+	NDPChassisEthernetRoutingSwitch553024TFD              NDPChassisType = 130
+	NDPChassisEthernetSwitch351024T                       NDPChassisType = 131
+	NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch NDPChassisType = 132
+	NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch NDPChassisType = 133
+	NDPChassisNortelSecureAccessSwitch                    NDPChassisType = 134
+	NDPChassisNortelNetworksVPNGateway3070                NDPChassisType = 135
+	NDPChassisOPTeraMetro3500                             NDPChassisType = 136
+	NDPChassisSMBBES101024T                               NDPChassisType = 137
+	NDPChassisSMBBES101048T                               NDPChassisType = 138
+	NDPChassisSMBBES102024TPWR                            NDPChassisType = 139
+	NDPChassisSMBBES102048TPWR                            NDPChassisType = 140
+	NDPChassisSMBBES201024T                               NDPChassisType = 141
+	NDPChassisSMBBES201048T                               NDPChassisType = 142
+	NDPChassisSMBBES202024TPWR                            NDPChassisType = 143
+	NDPChassisSMBBES202048TPWR                            NDPChassisType = 144
+	NDPChassisSMBBES11024T                                NDPChassisType = 145
+	NDPChassisSMBBES11048T                                NDPChassisType = 146
+	NDPChassisSMBBES12024TPWR                             NDPChassisType = 147
+	NDPChassisSMBBES12048TPWR                             NDPChassisType = 148
+	NDPChassisSMBBES21024T                                NDPChassisType = 149
+	NDPChassisSMBBES21048T                                NDPChassisType = 150
+	NDPChassisSMBBES22024TPWR                             NDPChassisType = 151
+	NDPChassisSMBBES22048TPWR                             NDPChassisType = 152
+	NDPChassisOME6500                                     NDPChassisType = 153
+	NDPChassisEthernetRoutingSwitch4548GT                 NDPChassisType = 154
+	NDPChassisEthernetRoutingSwitch4548GTPWR              NDPChassisType = 155
+	NDPChassisEthernetRoutingSwitch4550T                  NDPChassisType = 156
+	NDPChassisEthernetRoutingSwitch4550TPWR               NDPChassisType = 157
+	NDPChassisEthernetRoutingSwitch4526FX                 NDPChassisType = 158
+	NDPChassisEthernetRoutingSwitch250026T                NDPChassisType = 159
+	NDPChassisEthernetRoutingSwitch250026TPWR             NDPChassisType = 160
+	NDPChassisEthernetRoutingSwitch250050T                NDPChassisType = 161
+	NDPChassisEthernetRoutingSwitch250050TPWR             NDPChassisType = 162
+)
+
+type NDPBackplaneType uint8
+
+// Nortel Backplane Types
+const (
+	NDPBackplaneOther                                       NDPBackplaneType = 1
+	NDPBackplaneEthernet                                    NDPBackplaneType = 2
+	NDPBackplaneEthernetTokenring                           NDPBackplaneType = 3
+	NDPBackplaneEthernetFDDI                                NDPBackplaneType = 4
+	NDPBackplaneEthernetTokenringFDDI                       NDPBackplaneType = 5
+	NDPBackplaneEthernetTokenringRedundantPower             NDPBackplaneType = 6
+	NDPBackplaneEthernetTokenringFDDIRedundantPower         NDPBackplaneType = 7
+	NDPBackplaneTokenRing                                   NDPBackplaneType = 8
+	NDPBackplaneEthernetTokenringFastEthernet               NDPBackplaneType = 9
+	NDPBackplaneEthernetFastEthernet                        NDPBackplaneType = 10
+	NDPBackplaneEthernetTokenringFastEthernetRedundantPower NDPBackplaneType = 11
+	NDPBackplaneEthernetFastEthernetGigabitEthernet         NDPBackplaneType = 12
+)
+
+type NDPState uint8
+
+// Device State
+const (
+	NDPStateTopology  NDPState = 1
+	NDPStateHeartbeat NDPState = 2
+	NDPStateNew       NDPState = 3
+)
+
+// NortelDiscovery is a packet layer containing the Nortel Discovery Protocol.
+type NortelDiscovery struct {
+	BaseLayer
+	IPAddress net.IP
+	SegmentID []byte
+	Chassis   NDPChassisType
+	Backplane NDPBackplaneType
+	State     NDPState
+	NumLinks  uint8
+}
+
+// LayerType returns gopacket.LayerTypeNortelDiscovery.
+func (c *NortelDiscovery) LayerType() gopacket.LayerType {
+	return LayerTypeNortelDiscovery
+}
+
+func decodeNortelDiscovery(data []byte, p gopacket.PacketBuilder) error {
+	c := &NortelDiscovery{}
+	if len(data) < 11 {
+		return fmt.Errorf("Invalid NortelDiscovery packet length %d", len(data))
+	}
+	c.IPAddress = data[0:4]
+	c.SegmentID = data[4:7]
+	c.Chassis = NDPChassisType(data[7])
+	c.Backplane = NDPBackplaneType(data[8])
+	c.State = NDPState(data[9])
+	c.NumLinks = uint8(data[10])
+	p.AddLayer(c)
+	return nil
+}
+
+func (t NDPChassisType) String() (s string) {
+	switch t {
+	case NDPChassisother:
+		s = "other"
+	case NDPChassis3000:
+		s = "3000"
+	case NDPChassis3030:
+		s = "3030"
+	case NDPChassis2310:
+		s = "2310"
+	case NDPChassis2810:
+		s = "2810"
+	case NDPChassis2912:
+		s = "2912"
+	case NDPChassis2914:
+		s = "2914"
+	case NDPChassis271x:
+		s = "271x"
+	case NDPChassis2813:
+		s = "2813"
+	case NDPChassis2814:
+		s = "2814"
+	case NDPChassis2915:
+		s = "2915"
+	case NDPChassis5000:
+		s = "5000"
+	case NDPChassis2813SA:
+		s = "2813SA"
+	case NDPChassis2814SA:
+		s = "2814SA"
+	case NDPChassis810M:
+		s = "810M"
+	case NDPChassisEthercell:
+		s = "Ethercell"
+	case NDPChassis5005:
+		s = "5005"
+	case NDPChassisAlcatelEWC:
+		s = "Alcatel Ethernet workgroup conc."
+	case NDPChassis2715SA:
+		s = "2715SA"
+	case NDPChassis2486:
+		s = "2486"
+	case NDPChassis28000series:
+		s = "28000 series"
+	case NDPChassis23000series:
+		s = "23000 series"
+	case NDPChassis5DN00xseries:
+		s = "5DN00x series"
+	case NDPChassisBayStackEthernet:
+		s = "BayStack Ethernet"
+	case NDPChassis23100series:
+		s = "23100 series"
+	case NDPChassis100BaseTHub:
+		s = "100Base-T Hub"
+	case NDPChassis3000FastEthernet:
+		s = "3000 Fast Ethernet"
+	case NDPChassisOrionSwitch:
+		s = "Orion switch"
+	case NDPChassisDDS:
+		s = "DDS"
+	case NDPChassisCentillion6slot:
+		s = "Centillion (6 slot)"
+	case NDPChassisCentillion12slot:
+		s = "Centillion (12 slot)"
+	case NDPChassisCentillion1slot:
+		s = "Centillion (1 slot)"
+	case NDPChassisBayStack301:
+		s = "BayStack 301"
+	case NDPChassisBayStackTokenRingHub:
+		s = "BayStack TokenRing Hub"
+	case NDPChassisFVCMultimediaSwitch:
+		s = "FVC Multimedia Switch"
+	case NDPChassisSwitchNode:
+		s = "Switch Node"
+	case NDPChassisBayStack302Switch:
+		s = "BayStack 302 Switch"
+	case NDPChassisBayStack350Switch:
+		s = "BayStack 350 Switch"
+	case NDPChassisBayStack150EthernetHub:
+		s = "BayStack 150 Ethernet Hub"
+	case NDPChassisCentillion50NSwitch:
+		s = "Centillion 50N switch"
+	case NDPChassisCentillion50TSwitch:
+		s = "Centillion 50T switch"
+	case NDPChassisBayStack303304Switches:
+		s = "BayStack 303 and 304 Switches"
+	case NDPChassisBayStack200EthernetHub:
+		s = "BayStack 200 Ethernet Hub"
+	case NDPChassisBayStack25010100EthernetHub:
+		s = "BayStack 250 10/100 Ethernet Hub"
+	case NDPChassisBayStack450101001000Switches:
+		s = "BayStack 450 10/100/1000 Switches"
+	case NDPChassisBayStack41010100Switches:
+		s = "BayStack 410 10/100 Switches"
+	case NDPChassisPassport1200L3Switch:
+		s = "Passport 1200 L3 Switch"
+	case NDPChassisPassport1250L3Switch:
+		s = "Passport 1250 L3 Switch"
+	case NDPChassisPassport1100L3Switch:
+		s = "Passport 1100 L3 Switch"
+	case NDPChassisPassport1150L3Switch:
+		s = "Passport 1150 L3 Switch"
+	case NDPChassisPassport1050L3Switch:
+		s = "Passport 1050 L3 Switch"
+	case NDPChassisPassport1051L3Switch:
+		s = "Passport 1051 L3 Switch"
+	case NDPChassisPassport8610L3Switch:
+		s = "Passport 8610 L3 Switch"
+	case NDPChassisPassport8606L3Switch:
+		s = "Passport 8606 L3 Switch"
+	case NDPChassisPassport8010:
+		s = "Passport 8010"
+	case NDPChassisPassport8006:
+		s = "Passport 8006"
+	case NDPChassisBayStack670wirelessaccesspoint:
+		s = "BayStack 670 wireless access point"
+	case NDPChassisPassport740:
+		s = "Passport 740"
+	case NDPChassisPassport750:
+		s = "Passport 750"
+	case NDPChassisPassport790:
+		s = "Passport 790"
+	case NDPChassisBusinessPolicySwitch200010100Switches:
+		s = "Business Policy Switch 2000 10/100 Switches"
+	case NDPChassisPassport8110L2Switch:
+		s = "Passport 8110 L2 Switch"
+	case NDPChassisPassport8106L2Switch:
+		s = "Passport 8106 L2 Switch"
+	case NDPChassisBayStack3580GigSwitch:
+		s = "BayStack 3580 Gig Switch"
+	case NDPChassisBayStack10PowerSupplyUnit:
+		s = "BayStack 10 Power Supply Unit"
+	case NDPChassisBayStack42010100Switch:
+		s = "BayStack 420 10/100 Switch"
+	case NDPChassisOPTeraMetro1200EthernetServiceModule:
+		s = "OPTera Metro 1200 Ethernet Service Module"
+	case NDPChassisOPTera8010co:
+		s = "OPTera 8010co"
+	case NDPChassisOPTera8610coL3Switch:
+		s = "OPTera 8610co L3 switch"
+	case NDPChassisOPTera8110coL2Switch:
+		s = "OPTera 8110co L2 switch"
+	case NDPChassisOPTera8003:
+		s = "OPTera 8003"
+	case NDPChassisOPTera8603L3Switch:
+		s = "OPTera 8603 L3 switch"
+	case NDPChassisOPTera8103L2Switch:
+		s = "OPTera 8103 L2 switch"
+	case NDPChassisBayStack380101001000Switch:
+		s = "BayStack 380 10/100/1000 Switch"
+	case NDPChassisEthernetSwitch47048T:
+		s = "Ethernet Switch 470-48T"
+	case NDPChassisOPTeraMetro1450EthernetServiceModule:
+		s = "OPTera Metro 1450 Ethernet Service Module"
+	case NDPChassisOPTeraMetro1400EthernetServiceModule:
+		s = "OPTera Metro 1400 Ethernet Service Module"
+	case NDPChassisAlteonSwitchFamily:
+		s = "Alteon Switch Family"
+	case NDPChassisEthernetSwitch46024TPWR:
+		s = "Ethernet Switch 460-24T-PWR"
+	case NDPChassisOPTeraMetro8010OPML2Switch:
+		s = "OPTera Metro 8010 OPM L2 Switch"
+	case NDPChassisOPTeraMetro8010coOPML2Switch:
+		s = "OPTera Metro 8010co OPM L2 Switch"
+	case NDPChassisOPTeraMetro8006OPML2Switch:
+		s = "OPTera Metro 8006 OPM L2 Switch"
+	case NDPChassisOPTeraMetro8003OPML2Switch:
+		s = "OPTera Metro 8003 OPM L2 Switch"
+	case NDPChassisAlteon180e:
+		s = "Alteon 180e"
+	case NDPChassisAlteonAD3:
+		s = "Alteon AD3"
+	case NDPChassisAlteon184:
+		s = "Alteon 184"
+	case NDPChassisAlteonAD4:
+		s = "Alteon AD4"
+	case NDPChassisPassport1424L3Switch:
+		s = "Passport 1424 L3 switch"
+	case NDPChassisPassport1648L3Switch:
+		s = "Passport 1648 L3 switch"
+	case NDPChassisPassport1612L3Switch:
+		s = "Passport 1612 L3 switch"
+	case NDPChassisPassport1624L3Switch:
+		s = "Passport 1624 L3 switch"
+	case NDPChassisBayStack38024FFiber1000Switch:
+		s = "BayStack 380-24F Fiber 1000 Switch"
+	case NDPChassisEthernetRoutingSwitch551024T:
+		s = "Ethernet Routing Switch 5510-24T"
+	case NDPChassisEthernetRoutingSwitch551048T:
+		s = "Ethernet Routing Switch 5510-48T"
+	case NDPChassisEthernetSwitch47024T:
+		s = "Ethernet Switch 470-24T"
+	case NDPChassisNortelNetworksWirelessLANAccessPoint2220:
+		s = "Nortel Networks Wireless LAN Access Point 2220"
+	case NDPChassisPassportRBS2402L3Switch:
+		s = "Passport RBS 2402 L3 switch"
+	case NDPChassisAlteonApplicationSwitch2424:
+		s = "Alteon Application Switch 2424"
+	case NDPChassisAlteonApplicationSwitch2224:
+		s = "Alteon Application Switch 2224"
+	case NDPChassisAlteonApplicationSwitch2208:
+		s = "Alteon Application Switch 2208"
+	case NDPChassisAlteonApplicationSwitch2216:
+		s = "Alteon Application Switch 2216"
+	case NDPChassisAlteonApplicationSwitch3408:
+		s = "Alteon Application Switch 3408"
+	case NDPChassisAlteonApplicationSwitch3416:
+		s = "Alteon Application Switch 3416"
+	case NDPChassisNortelNetworksWirelessLANSecuritySwitch2250:
+		s = "Nortel Networks Wireless LAN SecuritySwitch 2250"
+	case NDPChassisEthernetSwitch42548T:
+		s = "Ethernet Switch 425-48T"
+	case NDPChassisEthernetSwitch42524T:
+		s = "Ethernet Switch 425-24T"
+	case NDPChassisNortelNetworksWirelessLANAccessPoint2221:
+		s = "Nortel Networks Wireless LAN Access Point 2221"
+	case NDPChassisNortelMetroEthernetServiceUnit24TSPFswitch:
+		s = "Nortel Metro Ethernet Service Unit 24-T SPF switch"
+	case NDPChassisNortelMetroEthernetServiceUnit24TLXDCswitch:
+		s = " Nortel Metro Ethernet Service Unit 24-T LX DC switch"
+	case NDPChassisPassport830010slotchassis:
+		s = "Passport 8300 10-slot chassis"
+	case NDPChassisPassport83006slotchassis:
+		s = "Passport 8300 6-slot chassis"
+	case NDPChassisEthernetRoutingSwitch552024TPWR:
+		s = "Ethernet Routing Switch 5520-24T-PWR"
+	case NDPChassisEthernetRoutingSwitch552048TPWR:
+		s = "Ethernet Routing Switch 5520-48T-PWR"
+	case NDPChassisNortelNetworksVPNGateway3050:
+		s = "Nortel Networks VPN Gateway 3050"
+	case NDPChassisAlteonSSL31010100:
+		s = "Alteon SSL 310 10/100"
+	case NDPChassisAlteonSSL31010100Fiber:
+		s = "Alteon SSL 310 10/100 Fiber"
+	case NDPChassisAlteonSSL31010100FIPS:
+		s = "Alteon SSL 310 10/100 FIPS"
+	case NDPChassisAlteonSSL410101001000:
+		s = "Alteon SSL 410 10/100/1000"
+	case NDPChassisAlteonSSL410101001000Fiber:
+		s = "Alteon SSL 410 10/100/1000 Fiber"
+	case NDPChassisAlteonApplicationSwitch2424SSL:
+		s = "Alteon Application Switch 2424-SSL"
+	case NDPChassisEthernetSwitch32524T:
+		s = "Ethernet Switch 325-24T"
+	case NDPChassisEthernetSwitch32524G:
+		s = "Ethernet Switch 325-24G"
+	case NDPChassisNortelNetworksWirelessLANAccessPoint2225:
+		s = "Nortel Networks Wireless LAN Access Point 2225"
+	case NDPChassisNortelNetworksWirelessLANSecuritySwitch2270:
+		s = "Nortel Networks Wireless LAN SecuritySwitch 2270"
+	case NDPChassis24portEthernetSwitch47024TPWR:
+		s = "24-port Ethernet Switch 470-24T-PWR"
+	case NDPChassis48portEthernetSwitch47048TPWR:
+		s = "48-port Ethernet Switch 470-48T-PWR"
+	case NDPChassisEthernetRoutingSwitch553024TFD:
+		s = "Ethernet Routing Switch 5530-24TFD"
+	case NDPChassisEthernetSwitch351024T:
+		s = "Ethernet Switch 3510-24T"
+	case NDPChassisNortelMetroEthernetServiceUnit12GACL3Switch:
+		s = "Nortel Metro Ethernet Service Unit 12G AC L3 switch"
+	case NDPChassisNortelMetroEthernetServiceUnit12GDCL3Switch:
+		s = "Nortel Metro Ethernet Service Unit 12G DC L3 switch"
+	case NDPChassisNortelSecureAccessSwitch:
+		s = "Nortel Secure Access Switch"
+	case NDPChassisNortelNetworksVPNGateway3070:
+		s = "Nortel Networks VPN Gateway 3070"
+	case NDPChassisOPTeraMetro3500:
+		s = "OPTera Metro 3500"
+	case NDPChassisSMBBES101024T:
+		s = "SMB BES 1010 24T"
+	case NDPChassisSMBBES101048T:
+		s = "SMB BES 1010 48T"
+	case NDPChassisSMBBES102024TPWR:
+		s = "SMB BES 1020 24T PWR"
+	case NDPChassisSMBBES102048TPWR:
+		s = "SMB BES 1020 48T PWR"
+	case NDPChassisSMBBES201024T:
+		s = "SMB BES 2010 24T"
+	case NDPChassisSMBBES201048T:
+		s = "SMB BES 2010 48T"
+	case NDPChassisSMBBES202024TPWR:
+		s = "SMB BES 2020 24T PWR"
+	case NDPChassisSMBBES202048TPWR:
+		s = "SMB BES 2020 48T PWR"
+	case NDPChassisSMBBES11024T:
+		s = "SMB BES 110 24T"
+	case NDPChassisSMBBES11048T:
+		s = "SMB BES 110 48T"
+	case NDPChassisSMBBES12024TPWR:
+		s = "SMB BES 120 24T PWR"
+	case NDPChassisSMBBES12048TPWR:
+		s = "SMB BES 120 48T PWR"
+	case NDPChassisSMBBES21024T:
+		s = "SMB BES 210 24T"
+	case NDPChassisSMBBES21048T:
+		s = "SMB BES 210 48T"
+	case NDPChassisSMBBES22024TPWR:
+		s = "SMB BES 220 24T PWR"
+	case NDPChassisSMBBES22048TPWR:
+		s = "SMB BES 220 48T PWR"
+	case NDPChassisOME6500:
+		s = "OME 6500"
+	case NDPChassisEthernetRoutingSwitch4548GT:
+		s = "Ethernet Routing Switch 4548GT"
+	case NDPChassisEthernetRoutingSwitch4548GTPWR:
+		s = "Ethernet Routing Switch 4548GT-PWR"
+	case NDPChassisEthernetRoutingSwitch4550T:
+		s = "Ethernet Routing Switch 4550T"
+	case NDPChassisEthernetRoutingSwitch4550TPWR:
+		s = "Ethernet Routing Switch 4550T-PWR"
+	case NDPChassisEthernetRoutingSwitch4526FX:
+		s = "Ethernet Routing Switch 4526FX"
+	case NDPChassisEthernetRoutingSwitch250026T:
+		s = "Ethernet Routing Switch 2500-26T"
+	case NDPChassisEthernetRoutingSwitch250026TPWR:
+		s = "Ethernet Routing Switch 2500-26T-PWR"
+	case NDPChassisEthernetRoutingSwitch250050T:
+		s = "Ethernet Routing Switch 2500-50T"
+	case NDPChassisEthernetRoutingSwitch250050TPWR:
+		s = "Ethernet Routing Switch 2500-50T-PWR"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t NDPBackplaneType) String() (s string) {
+	switch t {
+	case NDPBackplaneOther:
+		s = "Other"
+	case NDPBackplaneEthernet:
+		s = "Ethernet"
+	case NDPBackplaneEthernetTokenring:
+		s = "Ethernet and Tokenring"
+	case NDPBackplaneEthernetFDDI:
+		s = "Ethernet and FDDI"
+	case NDPBackplaneEthernetTokenringFDDI:
+		s = "Ethernet, Tokenring and FDDI"
+	case NDPBackplaneEthernetTokenringRedundantPower:
+		s = "Ethernet and Tokenring with redundant power"
+	case NDPBackplaneEthernetTokenringFDDIRedundantPower:
+		s = "Ethernet, Tokenring, FDDI with redundant power"
+	case NDPBackplaneTokenRing:
+		s = "Token Ring"
+	case NDPBackplaneEthernetTokenringFastEthernet:
+		s = "Ethernet, Tokenring and Fast Ethernet"
+	case NDPBackplaneEthernetFastEthernet:
+		s = "Ethernet and Fast Ethernet"
+	case NDPBackplaneEthernetTokenringFastEthernetRedundantPower:
+		s = "Ethernet, Tokenring, Fast Ethernet with redundant power"
+	case NDPBackplaneEthernetFastEthernetGigabitEthernet:
+		s = "Ethernet, Fast Ethernet and Gigabit Ethernet"
+	default:
+		s = "Unknown"
+	}
+	return
+}
+
+func (t NDPState) String() (s string) {
+	switch t {
+	case NDPStateTopology:
+		s = "Topology Change"
+	case NDPStateHeartbeat:
+		s = "Heartbeat"
+	case NDPStateNew:
+		s = "New"
+	default:
+		s = "Unknown"
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/layers/ntp.go b/vendor/github.com/google/gopacket/layers/ntp.go
new file mode 100644
index 0000000..33c15b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ntp.go
@@ -0,0 +1,416 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+//
+//******************************************************************************
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+//******************************************************************************
+//
+// Network Time Protocol (NTP) Decoding Layer
+// ------------------------------------------
+// This file provides a GoPacket decoding layer for NTP.
+//
+//******************************************************************************
+//
+// About The Network Time Protocol (NTP)
+// -------------------------------------
+// NTP is a protocol that enables computers on the internet to set their
+// clocks to the correct time (or to a time that is acceptably close to the
+// correct time). NTP runs on top of UDP.
+//
+// There have been a series of versions of the NTP protocol. The latest
+// version is V4 and is specified in RFC 5905:
+//     http://www.ietf.org/rfc/rfc5905.txt
+//
+//******************************************************************************
+//
+// References
+// ----------
+//
+// Wikipedia's NTP entry:
+//     https://en.wikipedia.org/wiki/Network_Time_Protocol
+//     This is the best place to get an overview of NTP.
+//
+// Network Time Protocol Home Website:
+//     http://www.ntp.org/
+//     This appears to be the official website of NTP.
+//
+// List of current NTP Protocol RFCs:
+//     http://www.ntp.org/rfc.html
+//
+// RFC 958: "Network Time Protocol (NTP)" (1985)
+//     https://tools.ietf.org/html/rfc958
+//     This is the original NTP specification.
+//
+// RFC 1305: "Network Time Protocol (Version 3) Specification, Implementation and Analysis" (1992)
+//     https://tools.ietf.org/html/rfc1305
+//     The protocol was updated in 1992 yielding NTP V3.
+//
+// RFC 5905: "Network Time Protocol Version 4: Protocol and Algorithms Specification" (2010)
+//     https://www.ietf.org/rfc/rfc5905.txt
+//     The protocol was updated in 2010 yielding NTP V4.
+//     V4 is backwards compatible with all previous versions of NTP.
+//
+// RFC 5906: "Network Time Protocol Version 4: Autokey Specification"
+//     https://tools.ietf.org/html/rfc5906
+//     This document addresses the security of the NTP protocol
+//     and is probably not relevant to this package.
+//
+// RFC 5907: "Definitions of Managed Objects for Network Time Protocol Version 4 (NTPv4)"
+//     https://tools.ietf.org/html/rfc5907
+//     This document addresses the management of NTP servers and
+//     is probably not relevant to this package.
+//
+// RFC 5908: "Network Time Protocol (NTP) Server Option for DHCPv6"
+//     https://tools.ietf.org/html/rfc5908
+//     This document addresses the use of NTP in DHCPv6 and is
+//     probably not relevant to this package.
+//
+// "Let's make a NTP Client in C"
+//     https://lettier.github.io/posts/2016-04-26-lets-make-a-ntp-client-in-c.html
+//     This web page contains useful information about the details of NTP,
+//     including an NTP record struture in C, and C code.
+//
+// "NTP Packet Header (NTP Reference Implementation) (Computer Network Time Synchronization)"
+//     http://what-when-how.com/computer-network-time-synchronization/
+//        ntp-packet-header-ntp-reference-implementation-computer-network-time-synchronization/
+//     This web page contains useful information on the details of NTP.
+//
+// "Technical information - NTP Data Packet"
+//     https://www.meinbergglobal.com/english/info/ntp-packet.htm
+//     This page has a helpful diagram of an NTP V4 packet.
+//
+//******************************************************************************
+//
+// Obsolete References
+// -------------------
+//
+// RFC 1119: "RFC-1119 "Network Time Protocol (Version 2) Specification and Implementation" (1989)
+//     https://tools.ietf.org/html/rfc1119
+//     Version 2 was drafted in 1989.
+//     It is unclear whether V2 was ever implememented or whether the
+//     ideas ended up in V3 (which was implemented in 1992).
+//
+// RFC 1361: "Simple Network Time Protocol (SNTP)"
+//     https://tools.ietf.org/html/rfc1361
+//     This document is obsoleted by RFC 1769 and is included only for completeness.
+//
+// RFC 1769: "Simple Network Time Protocol (SNTP)"
+//     https://tools.ietf.org/html/rfc1769
+//     This document is obsoleted by RFC 2030 and RFC 4330 and is included only for completeness.
+//
+// RFC 2030: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+//     https://tools.ietf.org/html/rfc2030
+//     This document is obsoleted by RFC 4330 and is included only for completeness.
+//
+// RFC 4330: "Simple Network Time Protocol (SNTP) Version 4 for IPv4, IPv6 and OSI"
+//     https://tools.ietf.org/html/rfc4330
+//     This document is obsoleted by RFC 5905 and is included only for completeness.
+//
+//******************************************************************************
+//
+// Endian And Bit Numbering Issues
+// -------------------------------
+//
+// Endian and bit numbering issues can be confusing. Here is some
+// clarification:
+//
+//    ENDIAN: Values are sent big endian.
+//    https://en.wikipedia.org/wiki/Endianness
+//
+//    BIT NUMBERING: Bits are numbered 0 upwards from the most significant
+//    bit to the least significant bit. This means that if there is a 32-bit
+//    value, the most significant bit is called bit 0 and the least
+//    significant bit is called bit 31.
+//
+// See RFC 791 Appendix B for more discussion.
+//
+//******************************************************************************
+//
+// NTP V3 and V4 Packet Format
+// ---------------------------
+// NTP packets are UDP packets whose payload contains an NTP record.
+//
+// The NTP RFC defines the format of the NTP record.
+//
+// There have been four versions of the protocol:
+//
+//    V1 in 1985
+//    V2 in 1989
+//    V3 in 1992
+//    V4 in 2010
+//
+// It is clear that V1 and V2 are obsolete, and there is no need to
+// cater for these formats.
+//
+// V3 and V4 essentially use the same format, with V4 adding some optional
+// fields on the end. So this package supports the V3 and V4 formats.
+//
+// The current version of NTP (NTP V4)'s RFC (V4 - RFC 5905) contains
+// the following diagram for the NTP record format:
+
+//      0                   1                   2                   3
+//      0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |LI | VN  |Mode |    Stratum    |     Poll      |   Precision   |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                         Root Delay                            |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                         Root Dispersion                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                          Reference ID                         |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                     Reference Timestamp (64)                  +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                      Origin Timestamp (64)                    +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                      Receive Timestamp (64)                   +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     +                      Transmit Timestamp (64)                  +
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     .                                                               .
+//     .                    Extension Field 1 (variable)               .
+//     .                                                               .
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     .                                                               .
+//     .                    Extension Field 2 (variable)               .
+//     .                                                               .
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                          Key Identifier                       |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     |                                                               |
+//     |                            dgst (128)                         |
+//     |                                                               |
+//     +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//     From http://www.ietf.org/rfc/rfc5905.txt
+//
+// The fields "Extension Field 1 (variable)" and later are optional fields,
+// and so we can set a minimum NTP record size of 48 bytes.
+//
+const ntpMinimumRecordSizeInBytes int = 48
+
+//******************************************************************************
+
+// NTP Type
+// --------
+// Type NTP implements the DecodingLayer interface. Each NTP object
+// represents in a structured form the NTP record present as the UDP
+// payload in an NTP UDP packet.
+//
+
+type NTPLeapIndicator uint8
+type NTPVersion uint8
+type NTPMode uint8
+type NTPStratum uint8
+type NTPLog2Seconds int8
+type NTPFixed16Seconds uint32
+type NTPReferenceID uint32
+type NTPTimestamp uint64
+
+type NTP struct {
+	BaseLayer // Stores the packet bytes and payload bytes.
+
+	LeapIndicator      NTPLeapIndicator  // [0,3]. Indicates whether leap second(s) is to be added.
+	Version            NTPVersion        // [0,7]. Version of the NTP protocol.
+	Mode               NTPMode           // [0,7]. Mode.
+	Stratum            NTPStratum        // [0,255]. Stratum of time server in the server tree.
+	Poll               NTPLog2Seconds    // [-128,127]. The maximum interval between successive messages, in log2 seconds.
+	Precision          NTPLog2Seconds    // [-128,127]. The precision of the system clock, in log2 seconds.
+	RootDelay          NTPFixed16Seconds // [0,2^32-1]. Total round trip delay to the reference clock in seconds times 2^16.
+	RootDispersion     NTPFixed16Seconds // [0,2^32-1]. Total dispersion to the reference clock, in seconds times 2^16.
+	ReferenceID        NTPReferenceID    // ID code of reference clock [0,2^32-1].
+	ReferenceTimestamp NTPTimestamp      // Most recent timestamp from the reference clock.
+	OriginTimestamp    NTPTimestamp      // Local time when request was sent from local host.
+	ReceiveTimestamp   NTPTimestamp      // Local time (on server) that request arrived at server host.
+	TransmitTimestamp  NTPTimestamp      // Local time (on server) that request departed server host.
+
+	// FIX: This package should analyse the extension fields and represent the extension fields too.
+	ExtensionBytes []byte // Just put extensions in a byte slice.
+}
+
+//******************************************************************************
+
+// LayerType returns the layer type of the NTP object, which is LayerTypeNTP.
+func (d *NTP) LayerType() gopacket.LayerType {
+	return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// decodeNTP analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// If it succeeds, it loads p with information about the packet and returns nil.
+// If it fails, it returns an error (non nil).
+//
+// This function is employed in layertypes.go to register the NTP layer.
+func decodeNTP(data []byte, p gopacket.PacketBuilder) error {
+
+	// Attempt to decode the byte slice.
+	d := &NTP{}
+	err := d.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	// If the decoding worked, add the layer to the packet and set it
+	// as the application layer too, if there isn't already one.
+	p.AddLayer(d)
+	p.SetApplicationLayer(d)
+
+	return nil
+}
+
+//******************************************************************************
+
+// DecodeFromBytes analyses a byte slice and attempts to decode it as an NTP
+// record of a UDP packet.
+//
+// Upon succeeds, it loads the NTP object with information about the packet
+// and returns nil.
+// Upon failure, it returns an error (non nil).
+func (d *NTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	// If the data block is too short to be a NTP record, then return an error.
+	if len(data) < ntpMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return errors.New("NTP packet too short")
+	}
+
+	// RFC 5905 does not appear to define a maximum NTP record length.
+	// The protocol allows "extension fields" to be included in the record,
+	// and states about these fields:"
+	//
+	//     "While the minimum field length containing required fields is
+	//      four words (16 octets), a maximum field length remains to be
+	//      established."
+	//
+	// For this reason, the packet length is not checked here for being too long.
+
+	// NTP type embeds type BaseLayer which contains two fields:
+	//    Contents is supposed to contain the bytes of the data at this level.
+	//    Payload is supposed to contain the payload of this level.
+	// Here we set the baselayer to be the bytes of the NTP record.
+	d.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+	// Extract the fields from the block of bytes.
+	// To make sense of this, refer to the packet diagram
+	// above and the section on endian conventions.
+
+	// The first few fields are all packed into the first 32 bits. Unpack them.
+	f := data[0]
+	d.LeapIndicator = NTPLeapIndicator((f & 0xC0) >> 6)
+	d.Version = NTPVersion((f & 0x38) >> 3)
+	d.Mode = NTPMode(f & 0x07)
+	d.Stratum = NTPStratum(data[1])
+	d.Poll = NTPLog2Seconds(data[2])
+	d.Precision = NTPLog2Seconds(data[3])
+
+	// The remaining fields can just be copied in big endian order.
+	d.RootDelay = NTPFixed16Seconds(binary.BigEndian.Uint32(data[4:8]))
+	d.RootDispersion = NTPFixed16Seconds(binary.BigEndian.Uint32(data[8:12]))
+	d.ReferenceID = NTPReferenceID(binary.BigEndian.Uint32(data[12:16]))
+	d.ReferenceTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[16:24]))
+	d.OriginTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[24:32]))
+	d.ReceiveTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[32:40]))
+	d.TransmitTimestamp = NTPTimestamp(binary.BigEndian.Uint64(data[40:48]))
+
+	// This layer does not attempt to analyse the extension bytes.
+	// But if there are any, we'd like the user to know. So we just
+	// place them all in an ExtensionBytes field.
+	d.ExtensionBytes = data[48:]
+
+	// Return no error.
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (d *NTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	data, err := b.PrependBytes(ntpMinimumRecordSizeInBytes)
+	if err != nil {
+		return err
+	}
+
+	// Pack the first few fields into the first 32 bits.
+	h := uint8(0)
+	h |= (uint8(d.LeapIndicator) << 6) & 0xC0
+	h |= (uint8(d.Version) << 3) & 0x38
+	h |= (uint8(d.Mode)) & 0x07
+	data[0] = byte(h)
+	data[1] = byte(d.Stratum)
+	data[2] = byte(d.Poll)
+	data[3] = byte(d.Precision)
+
+	// The remaining fields can just be copied in big endian order.
+	binary.BigEndian.PutUint32(data[4:8], uint32(d.RootDelay))
+	binary.BigEndian.PutUint32(data[8:12], uint32(d.RootDispersion))
+	binary.BigEndian.PutUint32(data[12:16], uint32(d.ReferenceID))
+	binary.BigEndian.PutUint64(data[16:24], uint64(d.ReferenceTimestamp))
+	binary.BigEndian.PutUint64(data[24:32], uint64(d.OriginTimestamp))
+	binary.BigEndian.PutUint64(data[32:40], uint64(d.ReceiveTimestamp))
+	binary.BigEndian.PutUint64(data[40:48], uint64(d.TransmitTimestamp))
+
+	ex, err := b.AppendBytes(len(d.ExtensionBytes))
+	if err != nil {
+		return err
+	}
+	copy(ex, d.ExtensionBytes)
+
+	return nil
+}
+
+//******************************************************************************
+
+// CanDecode returns a set of layers that NTP objects can decode.
+// As NTP objects can only decide the NTP layer, we can return just that layer.
+// Apparently a single layer type implements LayerClass.
+func (d *NTP) CanDecode() gopacket.LayerClass {
+	return LayerTypeNTP
+}
+
+//******************************************************************************
+
+// NextLayerType specifies the next layer that GoPacket should attempt to
+// analyse after this (NTP) layer. As NTP packets do not contain any payload
+// bytes, there are no further layers to analyse.
+func (d *NTP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+//******************************************************************************
+
+// NTP packets do not carry any data payload, so the empty byte slice is retured.
+// In Go, a nil slice is functionally identical to an empty slice, so we
+// return nil to avoid a heap allocation.
+func (d *NTP) Payload() []byte {
+	return nil
+}
+
+//******************************************************************************
+//*                            End Of NTP File                                 *
+//******************************************************************************
diff --git a/vendor/github.com/google/gopacket/layers/ospf.go b/vendor/github.com/google/gopacket/layers/ospf.go
new file mode 100644
index 0000000..4f5473d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ospf.go
@@ -0,0 +1,715 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// OSPFType denotes what kind of OSPF type it is
+type OSPFType uint8
+
+// Potential values for OSPF.Type.
+const (
+	OSPFHello                   OSPFType = 1
+	OSPFDatabaseDescription     OSPFType = 2
+	OSPFLinkStateRequest        OSPFType = 3
+	OSPFLinkStateUpdate         OSPFType = 4
+	OSPFLinkStateAcknowledgment OSPFType = 5
+)
+
+// LSA Function Codes for LSAheader.LSType
+const (
+	RouterLSAtypeV2         = 0x1
+	RouterLSAtype           = 0x2001
+	NetworkLSAtypeV2        = 0x2
+	NetworkLSAtype          = 0x2002
+	SummaryLSANetworktypeV2 = 0x3
+	InterAreaPrefixLSAtype  = 0x2003
+	SummaryLSAASBRtypeV2    = 0x4
+	InterAreaRouterLSAtype  = 0x2004
+	ASExternalLSAtypeV2     = 0x5
+	ASExternalLSAtype       = 0x4005
+	NSSALSAtype             = 0x2007
+	NSSALSAtypeV2           = 0x7
+	LinkLSAtype             = 0x0008
+	IntraAreaPrefixLSAtype  = 0x2009
+)
+
+// String conversions for OSPFType
+func (i OSPFType) String() string {
+	switch i {
+	case OSPFHello:
+		return "Hello"
+	case OSPFDatabaseDescription:
+		return "Database Description"
+	case OSPFLinkStateRequest:
+		return "Link State Request"
+	case OSPFLinkStateUpdate:
+		return "Link State Update"
+	case OSPFLinkStateAcknowledgment:
+		return "Link State Acknowledgment"
+	default:
+		return ""
+	}
+}
+
+// Prefix extends IntraAreaPrefixLSA
+type Prefix struct {
+	PrefixLength  uint8
+	PrefixOptions uint8
+	Metric        uint16
+	AddressPrefix []byte
+}
+
+// IntraAreaPrefixLSA is the struct from RFC 5340  A.4.10.
+type IntraAreaPrefixLSA struct {
+	NumOfPrefixes  uint16
+	RefLSType      uint16
+	RefLinkStateID uint32
+	RefAdvRouter   uint32
+	Prefixes       []Prefix
+}
+
+// LinkLSA is the struct from RFC 5340  A.4.9.
+type LinkLSA struct {
+	RtrPriority      uint8
+	Options          uint32
+	LinkLocalAddress []byte
+	NumOfPrefixes    uint32
+	Prefixes         []Prefix
+}
+
+// ASExternalLSAV2 is the struct from RFC 2328  A.4.5.
+type ASExternalLSAV2 struct {
+	NetworkMask       uint32
+	ExternalBit       uint8
+	Metric            uint32
+	ForwardingAddress uint32
+	ExternalRouteTag  uint32
+}
+
+// ASExternalLSA is the struct from RFC 5340  A.4.7.
+type ASExternalLSA struct {
+	Flags             uint8
+	Metric            uint32
+	PrefixLength      uint8
+	PrefixOptions     uint8
+	RefLSType         uint16
+	AddressPrefix     []byte
+	ForwardingAddress []byte
+	ExternalRouteTag  uint32
+	RefLinkStateID    uint32
+}
+
+// InterAreaRouterLSA is the struct from RFC 5340  A.4.6.
+type InterAreaRouterLSA struct {
+	Options             uint32
+	Metric              uint32
+	DestinationRouterID uint32
+}
+
+// InterAreaPrefixLSA is the struct from RFC 5340  A.4.5.
+type InterAreaPrefixLSA struct {
+	Metric        uint32
+	PrefixLength  uint8
+	PrefixOptions uint8
+	AddressPrefix []byte
+}
+
+// NetworkLSA is the struct from RFC 5340  A.4.4.
+type NetworkLSA struct {
+	Options        uint32
+	AttachedRouter []uint32
+}
+
+// NetworkLSAV2 is the struct from RFC 2328  A.4.3.
+type NetworkLSAV2 struct {
+	NetworkMask    uint32
+	AttachedRouter []uint32
+}
+
+// RouterV2 extends RouterLSAV2
+type RouterV2 struct {
+	Type     uint8
+	LinkID   uint32
+	LinkData uint32
+	Metric   uint16
+}
+
+// RouterLSAV2 is the struct from RFC 2328  A.4.2.
+type RouterLSAV2 struct {
+	Flags   uint8
+	Links   uint16
+	Routers []RouterV2
+}
+
+// Router extends RouterLSA
+type Router struct {
+	Type                uint8
+	Metric              uint16
+	InterfaceID         uint32
+	NeighborInterfaceID uint32
+	NeighborRouterID    uint32
+}
+
+// RouterLSA is the struct from RFC 5340  A.4.3.
+type RouterLSA struct {
+	Flags   uint8
+	Options uint32
+	Routers []Router
+}
+
+// LSAheader is the struct from RFC 5340  A.4.2 and RFC 2328 A.4.1.
+type LSAheader struct {
+	LSAge       uint16
+	LSType      uint16
+	LinkStateID uint32
+	AdvRouter   uint32
+	LSSeqNumber uint32
+	LSChecksum  uint16
+	Length      uint16
+	LSOptions   uint8
+}
+
+// LSA links LSAheader with the structs from RFC 5340  A.4.
+type LSA struct {
+	LSAheader
+	Content interface{}
+}
+
+// LSUpdate is the struct from RFC 5340  A.3.5.
+type LSUpdate struct {
+	NumOfLSAs uint32
+	LSAs      []LSA
+}
+
+// LSReq is the struct from RFC 5340  A.3.4.
+type LSReq struct {
+	LSType    uint16
+	LSID      uint32
+	AdvRouter uint32
+}
+
+// DbDescPkg is the struct from RFC 5340  A.3.3.
+type DbDescPkg struct {
+	Options      uint32
+	InterfaceMTU uint16
+	Flags        uint16
+	DDSeqNumber  uint32
+	LSAinfo      []LSAheader
+}
+
+// HelloPkg  is the struct from RFC 5340  A.3.2.
+type HelloPkg struct {
+	InterfaceID              uint32
+	RtrPriority              uint8
+	Options                  uint32
+	HelloInterval            uint16
+	RouterDeadInterval       uint32
+	DesignatedRouterID       uint32
+	BackupDesignatedRouterID uint32
+	NeighborID               []uint32
+}
+
+// HelloPkgV2 extends the HelloPkg struct with OSPFv2 information
+type HelloPkgV2 struct {
+	HelloPkg
+	NetworkMask uint32
+}
+
+// OSPF is a basic OSPF packet header with common fields of Version 2 and Version 3.
+type OSPF struct {
+	Version      uint8
+	Type         OSPFType
+	PacketLength uint16
+	RouterID     uint32
+	AreaID       uint32
+	Checksum     uint16
+	Content      interface{}
+}
+
+//OSPFv2 extend the OSPF head with version 2 specific fields
+type OSPFv2 struct {
+	BaseLayer
+	OSPF
+	AuType         uint16
+	Authentication uint64
+}
+
+// OSPFv3 extend the OSPF head with version 3 specific fields
+type OSPFv3 struct {
+	BaseLayer
+	OSPF
+	Instance uint8
+	Reserved uint8
+}
+
+// getLSAsv2 parses the LSA information from the packet for OSPFv2
+func getLSAsv2(num uint32, data []byte) ([]LSA, error) {
+	var lsas []LSA
+	var i uint32 = 0
+	var offset uint32 = 0
+	for ; i < num; i++ {
+		lstype := uint16(data[offset+3])
+		lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+		content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+		if err != nil {
+			return nil, fmt.Errorf("Could not extract Link State type.")
+		}
+		lsa := LSA{
+			LSAheader: LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[offset : offset+2]),
+				LSOptions:   data[offset+2],
+				LSType:      lstype,
+				LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+				Length:      lsalength,
+			},
+			Content: content,
+		}
+		lsas = append(lsas, lsa)
+		offset += uint32(lsalength)
+	}
+	return lsas, nil
+}
+
+// extractLSAInformation extracts all the LSA information
+func extractLSAInformation(lstype, lsalength uint16, data []byte) (interface{}, error) {
+	if lsalength < 20 {
+		return nil, fmt.Errorf("Link State header length %v too short, %v required", lsalength, 20)
+	}
+	if len(data) < int(lsalength) {
+		return nil, fmt.Errorf("Link State header length %v too short, %v required", len(data), lsalength)
+	}
+	var content interface{}
+	switch lstype {
+	case RouterLSAtypeV2:
+		var routers []RouterV2
+		var j uint32
+		for j = 24; j < uint32(lsalength); j += 12 {
+			if len(data) < int(j+12) {
+				return nil, errors.New("LSAtypeV2 too small")
+			}
+			router := RouterV2{
+				LinkID:   binary.BigEndian.Uint32(data[j : j+4]),
+				LinkData: binary.BigEndian.Uint32(data[j+4 : j+8]),
+				Type:     uint8(data[j+8]),
+				Metric:   binary.BigEndian.Uint16(data[j+10 : j+12]),
+			}
+			routers = append(routers, router)
+		}
+		if len(data) < 24 {
+			return nil, errors.New("LSAtypeV2 too small")
+		}
+		links := binary.BigEndian.Uint16(data[22:24])
+		content = RouterLSAV2{
+			Flags:   data[20],
+			Links:   links,
+			Routers: routers,
+		}
+	case NSSALSAtypeV2:
+		fallthrough
+	case ASExternalLSAtypeV2:
+		content = ASExternalLSAV2{
+			NetworkMask:       binary.BigEndian.Uint32(data[20:24]),
+			ExternalBit:       data[24] & 0x80,
+			Metric:            binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+			ForwardingAddress: binary.BigEndian.Uint32(data[28:32]),
+			ExternalRouteTag:  binary.BigEndian.Uint32(data[32:36]),
+		}
+	case NetworkLSAtypeV2:
+		var routers []uint32
+		var j uint32
+		for j = 24; j < uint32(lsalength); j += 4 {
+			routers = append(routers, binary.BigEndian.Uint32(data[j:j+4]))
+		}
+		content = NetworkLSAV2{
+			NetworkMask:    binary.BigEndian.Uint32(data[20:24]),
+			AttachedRouter: routers,
+		}
+	case RouterLSAtype:
+		var routers []Router
+		var j uint32
+		for j = 24; j < uint32(lsalength); j += 16 {
+			router := Router{
+				Type:                uint8(data[j]),
+				Metric:              binary.BigEndian.Uint16(data[j+2 : j+4]),
+				InterfaceID:         binary.BigEndian.Uint32(data[j+4 : j+8]),
+				NeighborInterfaceID: binary.BigEndian.Uint32(data[j+8 : j+12]),
+				NeighborRouterID:    binary.BigEndian.Uint32(data[j+12 : j+16]),
+			}
+			routers = append(routers, router)
+		}
+		content = RouterLSA{
+			Flags:   uint8(data[20]),
+			Options: binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			Routers: routers,
+		}
+	case NetworkLSAtype:
+		var routers []uint32
+		var j uint32
+		for j = 24; j < uint32(lsalength); j += 4 {
+			routers = append(routers, binary.BigEndian.Uint32(data[j:j+4]))
+		}
+		content = NetworkLSA{
+			Options:        binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			AttachedRouter: routers,
+		}
+	case InterAreaPrefixLSAtype:
+		content = InterAreaPrefixLSA{
+			Metric:        binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			PrefixLength:  uint8(data[24]),
+			PrefixOptions: uint8(data[25]),
+			AddressPrefix: data[28:uint32(lsalength)],
+		}
+	case InterAreaRouterLSAtype:
+		content = InterAreaRouterLSA{
+			Options:             binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			Metric:              binary.BigEndian.Uint32(data[24:28]) & 0x00FFFFFF,
+			DestinationRouterID: binary.BigEndian.Uint32(data[28:32]),
+		}
+	case ASExternalLSAtype:
+		fallthrough
+	case NSSALSAtype:
+		flags := uint8(data[20])
+		prefixLen := uint8(data[24]) / 8
+		var forwardingAddress []byte
+		if (flags & 0x02) == 0x02 {
+			forwardingAddress = data[28+uint32(prefixLen) : 28+uint32(prefixLen)+16]
+		}
+		content = ASExternalLSA{
+			Flags:             flags,
+			Metric:            binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			PrefixLength:      prefixLen,
+			PrefixOptions:     uint8(data[25]),
+			RefLSType:         binary.BigEndian.Uint16(data[26:28]),
+			AddressPrefix:     data[28 : 28+uint32(prefixLen)],
+			ForwardingAddress: forwardingAddress,
+		}
+	case LinkLSAtype:
+		var prefixes []Prefix
+		var prefixOffset uint32 = 44
+		var j uint32
+		numOfPrefixes := binary.BigEndian.Uint32(data[40:44])
+		for j = 0; j < numOfPrefixes; j++ {
+			prefixLen := uint8(data[prefixOffset])
+			prefix := Prefix{
+				PrefixLength:  prefixLen,
+				PrefixOptions: uint8(data[prefixOffset+1]),
+				AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+			}
+			prefixes = append(prefixes, prefix)
+			prefixOffset = prefixOffset + 4 + uint32(prefixLen)/8
+		}
+		content = LinkLSA{
+			RtrPriority:      uint8(data[20]),
+			Options:          binary.BigEndian.Uint32(data[20:24]) & 0x00FFFFFF,
+			LinkLocalAddress: data[24:40],
+			NumOfPrefixes:    numOfPrefixes,
+			Prefixes:         prefixes,
+		}
+	case IntraAreaPrefixLSAtype:
+		var prefixes []Prefix
+		var prefixOffset uint32 = 32
+		var j uint16
+		numOfPrefixes := binary.BigEndian.Uint16(data[20:22])
+		for j = 0; j < numOfPrefixes; j++ {
+			prefixLen := uint8(data[prefixOffset])
+			prefix := Prefix{
+				PrefixLength:  prefixLen,
+				PrefixOptions: uint8(data[prefixOffset+1]),
+				Metric:        binary.BigEndian.Uint16(data[prefixOffset+2 : prefixOffset+4]),
+				AddressPrefix: data[prefixOffset+4 : prefixOffset+4+uint32(prefixLen)/8],
+			}
+			prefixes = append(prefixes, prefix)
+			prefixOffset = prefixOffset + 4 + uint32(prefixLen)
+		}
+		content = IntraAreaPrefixLSA{
+			NumOfPrefixes:  numOfPrefixes,
+			RefLSType:      binary.BigEndian.Uint16(data[22:24]),
+			RefLinkStateID: binary.BigEndian.Uint32(data[24:28]),
+			RefAdvRouter:   binary.BigEndian.Uint32(data[28:32]),
+			Prefixes:       prefixes,
+		}
+	default:
+		return nil, fmt.Errorf("Unknown Link State type.")
+	}
+	return content, nil
+}
+
+// getLSAs parses the LSA information from the packet for OSPFv3
+func getLSAs(num uint32, data []byte) ([]LSA, error) {
+	var lsas []LSA
+	var i uint32 = 0
+	var offset uint32 = 0
+	for ; i < num; i++ {
+		var content interface{}
+		lstype := binary.BigEndian.Uint16(data[offset+2 : offset+4])
+		lsalength := binary.BigEndian.Uint16(data[offset+18 : offset+20])
+
+		content, err := extractLSAInformation(lstype, lsalength, data[offset:])
+		if err != nil {
+			return nil, fmt.Errorf("Could not extract Link State type.")
+		}
+		lsa := LSA{
+			LSAheader: LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[offset : offset+2]),
+				LSType:      lstype,
+				LinkStateID: binary.BigEndian.Uint32(data[offset+4 : offset+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[offset+8 : offset+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[offset+12 : offset+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[offset+16 : offset+18]),
+				Length:      lsalength,
+			},
+			Content: content,
+		}
+		lsas = append(lsas, lsa)
+		offset += uint32(lsalength)
+	}
+	return lsas, nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 24 {
+		return fmt.Errorf("Packet too smal for OSPF Version 2")
+	}
+
+	ospf.Version = uint8(data[0])
+	ospf.Type = OSPFType(data[1])
+	ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+	ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+	ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+	ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+	ospf.AuType = binary.BigEndian.Uint16(data[14:16])
+	ospf.Authentication = binary.BigEndian.Uint64(data[16:24])
+
+	switch ospf.Type {
+	case OSPFHello:
+		var neighbors []uint32
+		for i := 44; uint16(i+4) <= ospf.PacketLength; i += 4 {
+			neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+		}
+		ospf.Content = HelloPkgV2{
+			NetworkMask: binary.BigEndian.Uint32(data[24:28]),
+			HelloPkg: HelloPkg{
+				HelloInterval:            binary.BigEndian.Uint16(data[28:30]),
+				Options:                  uint32(data[30]),
+				RtrPriority:              uint8(data[31]),
+				RouterDeadInterval:       binary.BigEndian.Uint32(data[32:36]),
+				DesignatedRouterID:       binary.BigEndian.Uint32(data[36:40]),
+				BackupDesignatedRouterID: binary.BigEndian.Uint32(data[40:44]),
+				NeighborID:               neighbors,
+			},
+		}
+	case OSPFDatabaseDescription:
+		var lsas []LSAheader
+		for i := 32; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSType:      binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = DbDescPkg{
+			InterfaceMTU: binary.BigEndian.Uint16(data[24:26]),
+			Options:      uint32(data[26]),
+			Flags:        uint16(data[27]),
+			DDSeqNumber:  binary.BigEndian.Uint32(data[28:32]),
+			LSAinfo:      lsas,
+		}
+	case OSPFLinkStateRequest:
+		var lsrs []LSReq
+		for i := 24; uint16(i+12) <= ospf.PacketLength; i += 12 {
+			lsr := LSReq{
+				LSType:    binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LSID:      binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+			}
+			lsrs = append(lsrs, lsr)
+		}
+		ospf.Content = lsrs
+	case OSPFLinkStateUpdate:
+		num := binary.BigEndian.Uint32(data[24:28])
+
+		lsas, err := getLSAsv2(num, data[28:])
+		if err != nil {
+			return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+		}
+		ospf.Content = LSUpdate{
+			NumOfLSAs: num,
+			LSAs:      lsas,
+		}
+	case OSPFLinkStateAcknowledgment:
+		var lsas []LSAheader
+		for i := 24; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSOptions:   data[i+2],
+				LSType:      uint16(data[i+3]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = lsas
+	}
+	return nil
+}
+
+// DecodeFromBytes decodes the given bytes into the OSPF layer.
+func (ospf *OSPFv3) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	if len(data) < 16 {
+		return fmt.Errorf("Packet too smal for OSPF Version 3")
+	}
+
+	ospf.Version = uint8(data[0])
+	ospf.Type = OSPFType(data[1])
+	ospf.PacketLength = binary.BigEndian.Uint16(data[2:4])
+	ospf.RouterID = binary.BigEndian.Uint32(data[4:8])
+	ospf.AreaID = binary.BigEndian.Uint32(data[8:12])
+	ospf.Checksum = binary.BigEndian.Uint16(data[12:14])
+	ospf.Instance = uint8(data[14])
+	ospf.Reserved = uint8(data[15])
+
+	switch ospf.Type {
+	case OSPFHello:
+		var neighbors []uint32
+		for i := 36; uint16(i+4) <= ospf.PacketLength; i += 4 {
+			neighbors = append(neighbors, binary.BigEndian.Uint32(data[i:i+4]))
+		}
+		ospf.Content = HelloPkg{
+			InterfaceID:              binary.BigEndian.Uint32(data[16:20]),
+			RtrPriority:              uint8(data[20]),
+			Options:                  binary.BigEndian.Uint32(data[21:25]) >> 8,
+			HelloInterval:            binary.BigEndian.Uint16(data[24:26]),
+			RouterDeadInterval:       uint32(binary.BigEndian.Uint16(data[26:28])),
+			DesignatedRouterID:       binary.BigEndian.Uint32(data[28:32]),
+			BackupDesignatedRouterID: binary.BigEndian.Uint32(data[32:36]),
+			NeighborID:               neighbors,
+		}
+	case OSPFDatabaseDescription:
+		var lsas []LSAheader
+		for i := 28; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSType:      binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = DbDescPkg{
+			Options:      binary.BigEndian.Uint32(data[16:20]) & 0x00FFFFFF,
+			InterfaceMTU: binary.BigEndian.Uint16(data[20:22]),
+			Flags:        binary.BigEndian.Uint16(data[22:24]),
+			DDSeqNumber:  binary.BigEndian.Uint32(data[24:28]),
+			LSAinfo:      lsas,
+		}
+	case OSPFLinkStateRequest:
+		var lsrs []LSReq
+		for i := 16; uint16(i+12) <= ospf.PacketLength; i += 12 {
+			lsr := LSReq{
+				LSType:    binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LSID:      binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter: binary.BigEndian.Uint32(data[i+8 : i+12]),
+			}
+			lsrs = append(lsrs, lsr)
+		}
+		ospf.Content = lsrs
+	case OSPFLinkStateUpdate:
+		num := binary.BigEndian.Uint32(data[16:20])
+		lsas, err := getLSAs(num, data[20:])
+		if err != nil {
+			return fmt.Errorf("Cannot parse Link State Update packet: %v", err)
+		}
+		ospf.Content = LSUpdate{
+			NumOfLSAs: num,
+			LSAs:      lsas,
+		}
+
+	case OSPFLinkStateAcknowledgment:
+		var lsas []LSAheader
+		for i := 16; uint16(i+20) <= ospf.PacketLength; i += 20 {
+			lsa := LSAheader{
+				LSAge:       binary.BigEndian.Uint16(data[i : i+2]),
+				LSType:      binary.BigEndian.Uint16(data[i+2 : i+4]),
+				LinkStateID: binary.BigEndian.Uint32(data[i+4 : i+8]),
+				AdvRouter:   binary.BigEndian.Uint32(data[i+8 : i+12]),
+				LSSeqNumber: binary.BigEndian.Uint32(data[i+12 : i+16]),
+				LSChecksum:  binary.BigEndian.Uint16(data[i+16 : i+18]),
+				Length:      binary.BigEndian.Uint16(data[i+18 : i+20]),
+			}
+			lsas = append(lsas, lsa)
+		}
+		ospf.Content = lsas
+	default:
+	}
+
+	return nil
+}
+
+// LayerType returns LayerTypeOSPF
+func (ospf *OSPFv2) LayerType() gopacket.LayerType {
+	return LayerTypeOSPF
+}
+func (ospf *OSPFv3) LayerType() gopacket.LayerType {
+	return LayerTypeOSPF
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (ospf *OSPFv2) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+func (ospf *OSPFv3) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (ospf *OSPFv2) CanDecode() gopacket.LayerClass {
+	return LayerTypeOSPF
+}
+func (ospf *OSPFv3) CanDecode() gopacket.LayerClass {
+	return LayerTypeOSPF
+}
+
+func decodeOSPF(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 14 {
+		return fmt.Errorf("Packet too smal for OSPF")
+	}
+
+	switch uint8(data[0]) {
+	case 2:
+		ospf := &OSPFv2{}
+		return decodingLayerDecoder(ospf, data, p)
+	case 3:
+		ospf := &OSPFv3{}
+		return decodingLayerDecoder(ospf, data, p)
+	default:
+	}
+
+	return fmt.Errorf("Unable to determine OSPF type.")
+}
diff --git a/vendor/github.com/google/gopacket/layers/pflog.go b/vendor/github.com/google/gopacket/layers/pflog.go
new file mode 100644
index 0000000..9dbbd90
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pflog.go
@@ -0,0 +1,84 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+type PFDirection uint8
+
+const (
+	PFDirectionInOut PFDirection = 0
+	PFDirectionIn    PFDirection = 1
+	PFDirectionOut   PFDirection = 2
+)
+
+// PFLog provides the layer for 'pf' packet-filter logging, as described at
+// http://www.freebsd.org/cgi/man.cgi?query=pflog&sektion=4
+type PFLog struct {
+	BaseLayer
+	Length              uint8
+	Family              ProtocolFamily
+	Action, Reason      uint8
+	IFName, Ruleset     []byte
+	RuleNum, SubruleNum uint32
+	UID                 uint32
+	PID                 int32
+	RuleUID             uint32
+	RulePID             int32
+	Direction           PFDirection
+	// The remainder is padding
+}
+
+func (pf *PFLog) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 60 {
+		df.SetTruncated()
+		return errors.New("PFLog data less than 60 bytes")
+	}
+	pf.Length = data[0]
+	pf.Family = ProtocolFamily(data[1])
+	pf.Action = data[2]
+	pf.Reason = data[3]
+	pf.IFName = data[4:20]
+	pf.Ruleset = data[20:36]
+	pf.RuleNum = binary.BigEndian.Uint32(data[36:40])
+	pf.SubruleNum = binary.BigEndian.Uint32(data[40:44])
+	pf.UID = binary.BigEndian.Uint32(data[44:48])
+	pf.PID = int32(binary.BigEndian.Uint32(data[48:52]))
+	pf.RuleUID = binary.BigEndian.Uint32(data[52:56])
+	pf.RulePID = int32(binary.BigEndian.Uint32(data[56:60]))
+	pf.Direction = PFDirection(data[60])
+	if pf.Length%4 != 1 {
+		return errors.New("PFLog header length should be 3 less than multiple of 4")
+	}
+	actualLength := int(pf.Length) + 3
+	if len(data) < actualLength {
+		return fmt.Errorf("PFLog data size < %d", actualLength)
+	}
+	pf.Contents = data[:actualLength]
+	pf.Payload = data[actualLength:]
+	return nil
+}
+
+// LayerType returns layers.LayerTypePFLog
+func (pf *PFLog) LayerType() gopacket.LayerType { return LayerTypePFLog }
+
+func (pf *PFLog) CanDecode() gopacket.LayerClass { return LayerTypePFLog }
+
+func (pf *PFLog) NextLayerType() gopacket.LayerType {
+	return pf.Family.LayerType()
+}
+
+func decodePFLog(data []byte, p gopacket.PacketBuilder) error {
+	pf := &PFLog{}
+	return decodingLayerDecoder(pf, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/ports.go b/vendor/github.com/google/gopacket/layers/ports.go
new file mode 100644
index 0000000..7c20449
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ports.go
@@ -0,0 +1,201 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"fmt"
+	"strconv"
+
+	"github.com/google/gopacket"
+)
+
+// TCPPort is a port in a TCP layer.
+type TCPPort uint16
+
+// UDPPort is a port in a UDP layer.
+type UDPPort uint16
+
+// RUDPPort is a port in a RUDP layer.
+type RUDPPort uint8
+
+// SCTPPort is a port in a SCTP layer.
+type SCTPPort uint16
+
+// UDPLitePort is a port in a UDPLite layer.
+type UDPLitePort uint16
+
+// RUDPPortNames contains the string names for all RUDP ports.
+var RUDPPortNames = map[RUDPPort]string{}
+
+// UDPLitePortNames contains the string names for all UDPLite ports.
+var UDPLitePortNames = map[UDPLitePort]string{}
+
+// {TCP,UDP,SCTP}PortNames can be found in iana_ports.go
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// TCPPortNames.
+func (a TCPPort) String() string {
+	if name, ok := TCPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a TCPPort) LayerType() gopacket.LayerType {
+	if tcpPortLayerTypeOverride.has(uint16(a)) {
+		return tcpPortLayerType[a]
+	}
+	switch a {
+	case 53:
+		return LayerTypeDNS
+	case 443: // https
+		return LayerTypeTLS
+	case 502: // modbustcp
+		return LayerTypeModbusTCP
+	case 636: // ldaps
+		return LayerTypeTLS
+	case 989: // ftps-data
+		return LayerTypeTLS
+	case 990: // ftps
+		return LayerTypeTLS
+	case 992: // telnets
+		return LayerTypeTLS
+	case 993: // imaps
+		return LayerTypeTLS
+	case 994: // ircs
+		return LayerTypeTLS
+	case 995: // pop3s
+		return LayerTypeTLS
+	case 5061: // ips
+		return LayerTypeTLS
+	}
+	return gopacket.LayerTypePayload
+}
+
+var tcpPortLayerTypeOverride bitfield
+
+var tcpPortLayerType = map[TCPPort]gopacket.LayerType{}
+
+// RegisterTCPPortLayerType creates a new mapping between a TCPPort
+// and an underlaying LayerType.
+func RegisterTCPPortLayerType(port TCPPort, layerType gopacket.LayerType) {
+	tcpPortLayerTypeOverride.set(uint16(port))
+	tcpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// UDPPortNames.
+func (a UDPPort) String() string {
+	if name, ok := UDPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// LayerType returns a LayerType that would be able to decode the
+// application payload. It uses some well-known ports such as 53 for
+// DNS.
+//
+// Returns gopacket.LayerTypePayload for unknown/unsupported port numbers.
+func (a UDPPort) LayerType() gopacket.LayerType {
+	if udpPortLayerTypeOverride.has(uint16(a)) {
+		return udpPortLayerType[a]
+	}
+	switch a {
+	case 53:
+		return LayerTypeDNS
+	case 67:
+		return LayerTypeDHCPv4
+	case 68:
+		return LayerTypeDHCPv4
+	case 123:
+		return LayerTypeNTP
+	case 546:
+		return LayerTypeDHCPv6
+	case 547:
+		return LayerTypeDHCPv6
+	case 623:
+		return LayerTypeRMCP
+	case 1812:
+		return LayerTypeRADIUS
+	case 2152:
+		return LayerTypeGTPv1U
+	case 3784:
+		return LayerTypeBFD
+	case 4789:
+		return LayerTypeVXLAN
+	case 5060:
+		return LayerTypeSIP
+	case 6081:
+		return LayerTypeGeneve
+	case 6343:
+		return LayerTypeSFlow
+	}
+	return gopacket.LayerTypePayload
+}
+
+var udpPortLayerTypeOverride bitfield
+
+var udpPortLayerType = map[UDPPort]gopacket.LayerType{
+	53:   LayerTypeDNS,
+	123:  LayerTypeNTP,
+	4789: LayerTypeVXLAN,
+	67:   LayerTypeDHCPv4,
+	68:   LayerTypeDHCPv4,
+	546:  LayerTypeDHCPv6,
+	547:  LayerTypeDHCPv6,
+	5060: LayerTypeSIP,
+	6343: LayerTypeSFlow,
+	6081: LayerTypeGeneve,
+	3784: LayerTypeBFD,
+	2152: LayerTypeGTPv1U,
+	623:  LayerTypeRMCP,
+}
+
+// RegisterUDPPortLayerType creates a new mapping between a UDPPort
+// and an underlaying LayerType.
+func RegisterUDPPortLayerType(port UDPPort, layerType gopacket.LayerType) {
+	udpPortLayerTypeOverride.set(uint16(port))
+	udpPortLayerType[port] = layerType
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// RUDPPortNames.
+func (a RUDPPort) String() string {
+	if name, ok := RUDPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// SCTPPortNames.
+func (a SCTPPort) String() string {
+	if name, ok := SCTPPortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
+
+// String returns the port as "number(name)" if there's a well-known port name,
+// or just "number" if there isn't.  Well-known names are stored in
+// UDPLitePortNames.
+func (a UDPLitePort) String() string {
+	if name, ok := UDPLitePortNames[a]; ok {
+		return fmt.Sprintf("%d(%s)", a, name)
+	}
+	return strconv.Itoa(int(a))
+}
diff --git a/vendor/github.com/google/gopacket/layers/ppp.go b/vendor/github.com/google/gopacket/layers/ppp.go
new file mode 100644
index 0000000..e534d69
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/ppp.go
@@ -0,0 +1,88 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+// PPP is the layer for PPP encapsulation headers.
+type PPP struct {
+	BaseLayer
+	PPPType       PPPType
+	HasPPTPHeader bool
+}
+
+// PPPEndpoint is a singleton endpoint for PPP.  Since there is no actual
+// addressing for the two ends of a PPP connection, we use a singleton value
+// named 'point' for each endpoint.
+var PPPEndpoint = gopacket.NewEndpoint(EndpointPPP, nil)
+
+// PPPFlow is a singleton flow for PPP.  Since there is no actual addressing for
+// the two ends of a PPP connection, we use a singleton value to represent the
+// flow for all PPP connections.
+var PPPFlow = gopacket.NewFlow(EndpointPPP, nil, nil)
+
+// LayerType returns LayerTypePPP
+func (p *PPP) LayerType() gopacket.LayerType { return LayerTypePPP }
+
+// LinkFlow returns PPPFlow.
+func (p *PPP) LinkFlow() gopacket.Flow { return PPPFlow }
+
+func decodePPP(data []byte, p gopacket.PacketBuilder) error {
+	ppp := &PPP{}
+	offset := 0
+	if data[0] == 0xff && data[1] == 0x03 {
+		offset = 2
+		ppp.HasPPTPHeader = true
+	}
+	if data[offset]&0x1 == 0 {
+		if data[offset+1]&0x1 == 0 {
+			return errors.New("PPP has invalid type")
+		}
+		ppp.PPPType = PPPType(binary.BigEndian.Uint16(data[offset : offset+2]))
+		ppp.Contents = data[offset : offset+2]
+		ppp.Payload = data[offset+2:]
+	} else {
+		ppp.PPPType = PPPType(data[offset])
+		ppp.Contents = data[offset : offset+1]
+		ppp.Payload = data[offset+1:]
+	}
+	p.AddLayer(ppp)
+	p.SetLinkLayer(ppp)
+	return p.NextDecoder(ppp.PPPType)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	if p.PPPType&0x100 == 0 {
+		bytes, err := b.PrependBytes(2)
+		if err != nil {
+			return err
+		}
+		binary.BigEndian.PutUint16(bytes, uint16(p.PPPType))
+	} else {
+		bytes, err := b.PrependBytes(1)
+		if err != nil {
+			return err
+		}
+		bytes[0] = uint8(p.PPPType)
+	}
+	if p.HasPPTPHeader {
+		bytes, err := b.PrependBytes(2)
+		if err != nil {
+			return err
+		}
+		bytes[0] = 0xff
+		bytes[1] = 0x03
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/pppoe.go b/vendor/github.com/google/gopacket/layers/pppoe.go
new file mode 100644
index 0000000..9a40e2d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/pppoe.go
@@ -0,0 +1,206 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+type PPPoEOpt uint16
+
+// Constants for PPPoE Options
+const (
+	PPPoEOptEndOfList        PPPoEOpt = 0x0000
+	PPPoEOptServiceName      PPPoEOpt = 0x0101
+	PPPoEOptAcName           PPPoEOpt = 0x0102
+	PPPoEOptHostUniq         PPPoEOpt = 0x0103
+	PPPoEOptAcCookie         PPPoEOpt = 0x0104
+	PPPoEOptVendorSpecific   PPPoEOpt = 0x0105
+	PPPoEOptRelaySessionId   PPPoEOpt = 0x0110
+	PPPoEOptServiceErrorName PPPoEOpt = 0x0201
+	PPPoEOptAcSystemError    PPPoEOpt = 0x0202
+	PPPoEOptGenericError     PPPoEOpt = 0x0203
+)
+
+type PPPoEOption struct {
+	Type   PPPoEOpt
+	Length uint16
+	Data   []byte
+}
+
+func (o PPPoEOption) encode(b []byte) (uint16, error) {
+	binary.BigEndian.PutUint16(b[0:2], uint16(o.Type))
+	binary.BigEndian.PutUint16(b[2:4], uint16(o.Length))
+	copy(b[4:], o.Data)
+	return (4 + o.Length), nil
+}
+
+func (o PPPoEOption) decode(b []byte) error {
+	if len(b) < 4 {
+		return DecOptionNotEnoughData
+	}
+	o.Type = PPPoEOpt(binary.BigEndian.Uint16(b[0:2]))
+	o.Length = binary.BigEndian.Uint16(b[2:4])
+	if int(o.Length) > len(b[4:]) {
+		return DecOptionNotEnoughData
+	}
+	o.Data = b[4 : 4+int(o.Length)]
+	return nil
+}
+
+func NewPPPoEOption(opt PPPoEOpt, data []byte) PPPoEOption {
+	length := (uint16)(len(data))
+	option := PPPoEOption{Type: opt, Length: length, Data: data}
+	return option
+}
+
+func (o PPPoEOption) String() string {
+	return fmt.Sprintf("Option(%s:%v)", o.Type, o.Data)
+}
+
+func (o PPPoEOption) length() int {
+	return 4 + int(o.Length)
+}
+
+type PPPoEOptions []PPPoEOption
+
+// String returns a string version of the options list.
+func (o PPPoEOptions) String() string {
+	buf := &bytes.Buffer{}
+	buf.WriteByte('[')
+	for i, opt := range o {
+		buf.WriteString(opt.String())
+		if i+1 != len(o) {
+			buf.WriteString(", ")
+		}
+	}
+	buf.WriteByte(']')
+	return buf.String()
+}
+
+func (o PPPoEOptions) length() int {
+	length := 0
+	for _, op := range o {
+		length = length + op.length()
+	}
+	return length
+}
+
+// String returns a string version of a PPPoEOpt.
+func (o PPPoEOpt) String() string {
+	switch o {
+	case PPPoEOptEndOfList:
+		return "EndOfList"
+	case PPPoEOptServiceName:
+		return "ServiceName"
+	case PPPoEOptAcName:
+		return "AcName"
+	case PPPoEOptHostUniq:
+		return "HostUniq"
+	case PPPoEOptAcCookie:
+		return "AcCookie"
+	case PPPoEOptVendorSpecific:
+		return "VendorSpecific"
+	case PPPoEOptRelaySessionId:
+		return "RelaySessionId"
+	case PPPoEOptServiceErrorName:
+		return "ServiceErrorName"
+	case PPPoEOptAcSystemError:
+		return "AcSystemError"
+	case PPPoEOptGenericError:
+		return "GenericError"
+	default:
+		return "Unknown"
+	}
+}
+
+// PPPoE is the layer for PPPoE encapsulation headers.
+type PPPoE struct {
+	BaseLayer
+	Version   uint8
+	Type      uint8
+	Code      PPPoECode
+	SessionId uint16
+	Length    uint16
+	Options   PPPoEOptions
+}
+
+// LayerType returns gopacket.LayerTypePPPoE.
+func (p *PPPoE) LayerType() gopacket.LayerType {
+	return LayerTypePPPoE
+}
+
+func (p *PPPoE) DecodeOptions(data []byte, len uint16) {
+
+	start := uint16(0)
+	stop := len
+	for start < stop {
+		pppoeOpt := &PPPoEOption{}
+		pppoeOpt.Type = PPPoEOpt(binary.BigEndian.Uint16(data[start : start+2]))
+		pppoeOpt.Length = binary.BigEndian.Uint16(data[start+2 : start+4])
+		if pppoeOpt.Length != 0 {
+			pppoeOpt.Data = data[start+4 : start+4+pppoeOpt.Length]
+		}
+		start = start + 4 + pppoeOpt.Length
+		p.Options = append(p.Options, *pppoeOpt)
+	}
+
+}
+
+// decodePPPoE decodes the PPPoE header (see http://tools.ietf.org/html/rfc2516).
+func decodePPPoE(data []byte, p gopacket.PacketBuilder) error {
+
+	pppoe := &PPPoE{
+		Version:   data[0] >> 4,
+		Type:      data[0] & 0x0F,
+		Code:      PPPoECode(data[1]),
+		SessionId: binary.BigEndian.Uint16(data[2:4]),
+		Length:    binary.BigEndian.Uint16(data[4:6]),
+	}
+	pppoe.DecodeOptions(data[6:], pppoe.Length)
+	pppoe.BaseLayer = BaseLayer{data[:6], data[6 : 6+pppoe.Length]}
+	p.AddLayer(pppoe)
+	return p.NextDecoder(pppoe.Code)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (p *PPPoE) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	payload := b.Bytes()
+
+	//Pkt length with Options
+	length := 6
+	optLength := 0
+	for _, o := range p.Options {
+		optLength = optLength + o.length()
+	}
+
+	bytes, err := b.PrependBytes(length + optLength)
+	if err != nil {
+		return err
+	}
+	bytes[0] = (p.Version << 4) | p.Type
+	bytes[1] = byte(p.Code)
+	binary.BigEndian.PutUint16(bytes[2:], p.SessionId)
+	if opts.FixLengths {
+		p.Length = uint16(len(payload) + optLength)
+	}
+	binary.BigEndian.PutUint16(bytes[4:], p.Length)
+
+	//Encoding the options
+	start := uint16(6)
+	for _, option := range p.Options {
+		optLen, _ := option.encode(bytes[start:])
+		start = start + optLen
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/prism.go b/vendor/github.com/google/gopacket/layers/prism.go
new file mode 100644
index 0000000..e1711e7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/prism.go
@@ -0,0 +1,146 @@
+// Copyright 2015 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// http://www.tcpdump.org/linktypes/LINKTYPE_IEEE802_11_PRISM.html
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+func decodePrismValue(data []byte, pv *PrismValue) {
+	pv.DID = PrismDID(binary.LittleEndian.Uint32(data[0:4]))
+	pv.Status = binary.LittleEndian.Uint16(data[4:6])
+	pv.Length = binary.LittleEndian.Uint16(data[6:8])
+	pv.Data = data[8 : 8+pv.Length]
+}
+
+type PrismDID uint32
+
+const (
+	PrismDIDType1HostTime                  PrismDID = 0x10044
+	PrismDIDType2HostTime                  PrismDID = 0x01041
+	PrismDIDType1MACTime                   PrismDID = 0x20044
+	PrismDIDType2MACTime                   PrismDID = 0x02041
+	PrismDIDType1Channel                   PrismDID = 0x30044
+	PrismDIDType2Channel                   PrismDID = 0x03041
+	PrismDIDType1RSSI                      PrismDID = 0x40044
+	PrismDIDType2RSSI                      PrismDID = 0x04041
+	PrismDIDType1SignalQuality             PrismDID = 0x50044
+	PrismDIDType2SignalQuality             PrismDID = 0x05041
+	PrismDIDType1Signal                    PrismDID = 0x60044
+	PrismDIDType2Signal                    PrismDID = 0x06041
+	PrismDIDType1Noise                     PrismDID = 0x70044
+	PrismDIDType2Noise                     PrismDID = 0x07041
+	PrismDIDType1Rate                      PrismDID = 0x80044
+	PrismDIDType2Rate                      PrismDID = 0x08041
+	PrismDIDType1TransmittedFrameIndicator PrismDID = 0x90044
+	PrismDIDType2TransmittedFrameIndicator PrismDID = 0x09041
+	PrismDIDType1FrameLength               PrismDID = 0xA0044
+	PrismDIDType2FrameLength               PrismDID = 0x0A041
+)
+
+const (
+	PrismType1MessageCode uint16 = 0x00000044
+	PrismType2MessageCode uint16 = 0x00000041
+)
+
+func (p PrismDID) String() string {
+	dids := map[PrismDID]string{
+		PrismDIDType1HostTime:                  "Host Time",
+		PrismDIDType2HostTime:                  "Host Time",
+		PrismDIDType1MACTime:                   "MAC Time",
+		PrismDIDType2MACTime:                   "MAC Time",
+		PrismDIDType1Channel:                   "Channel",
+		PrismDIDType2Channel:                   "Channel",
+		PrismDIDType1RSSI:                      "RSSI",
+		PrismDIDType2RSSI:                      "RSSI",
+		PrismDIDType1SignalQuality:             "Signal Quality",
+		PrismDIDType2SignalQuality:             "Signal Quality",
+		PrismDIDType1Signal:                    "Signal",
+		PrismDIDType2Signal:                    "Signal",
+		PrismDIDType1Noise:                     "Noise",
+		PrismDIDType2Noise:                     "Noise",
+		PrismDIDType1Rate:                      "Rate",
+		PrismDIDType2Rate:                      "Rate",
+		PrismDIDType1TransmittedFrameIndicator: "Transmitted Frame Indicator",
+		PrismDIDType2TransmittedFrameIndicator: "Transmitted Frame Indicator",
+		PrismDIDType1FrameLength:               "Frame Length",
+		PrismDIDType2FrameLength:               "Frame Length",
+	}
+
+	if str, ok := dids[p]; ok {
+		return str
+	}
+
+	return "Unknown DID"
+}
+
+type PrismValue struct {
+	DID    PrismDID
+	Status uint16
+	Length uint16
+	Data   []byte
+}
+
+func (pv *PrismValue) IsSupplied() bool {
+	return pv.Status == 1
+}
+
+var ErrPrismExpectedMoreData = errors.New("Expected more data.")
+var ErrPrismInvalidCode = errors.New("Invalid header code.")
+
+func decodePrismHeader(data []byte, p gopacket.PacketBuilder) error {
+	d := &PrismHeader{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type PrismHeader struct {
+	BaseLayer
+	Code       uint16
+	Length     uint16
+	DeviceName string
+	Values     []PrismValue
+}
+
+func (m *PrismHeader) LayerType() gopacket.LayerType { return LayerTypePrismHeader }
+
+func (m *PrismHeader) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Code = binary.LittleEndian.Uint16(data[0:4])
+	m.Length = binary.LittleEndian.Uint16(data[4:8])
+	m.DeviceName = string(data[8:24])
+	m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: data[m.Length:len(data)]}
+
+	switch m.Code {
+	case PrismType1MessageCode:
+		fallthrough
+	case PrismType2MessageCode:
+		// valid message code
+	default:
+		return ErrPrismInvalidCode
+	}
+
+	offset := uint16(24)
+
+	m.Values = make([]PrismValue, (m.Length-offset)/12)
+	for i := 0; i < len(m.Values); i++ {
+		decodePrismValue(data[offset:offset+12], &m.Values[i])
+		offset += 12
+	}
+
+	if offset != m.Length {
+		return ErrPrismExpectedMoreData
+	}
+
+	return nil
+}
+
+func (m *PrismHeader) CanDecode() gopacket.LayerClass    { return LayerTypePrismHeader }
+func (m *PrismHeader) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/radiotap.go b/vendor/github.com/google/gopacket/layers/radiotap.go
new file mode 100644
index 0000000..d09559f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/radiotap.go
@@ -0,0 +1,1076 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/crc32"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+// align calculates the number of bytes needed to align with the width
+// on the offset, returning the number of bytes we need to skip to
+// align to the offset (width).
+func align(offset uint16, width uint16) uint16 {
+	return ((((offset) + ((width) - 1)) & (^((width) - 1))) - offset)
+}
+
+type RadioTapPresent uint32
+
+const (
+	RadioTapPresentTSFT RadioTapPresent = 1 << iota
+	RadioTapPresentFlags
+	RadioTapPresentRate
+	RadioTapPresentChannel
+	RadioTapPresentFHSS
+	RadioTapPresentDBMAntennaSignal
+	RadioTapPresentDBMAntennaNoise
+	RadioTapPresentLockQuality
+	RadioTapPresentTxAttenuation
+	RadioTapPresentDBTxAttenuation
+	RadioTapPresentDBMTxPower
+	RadioTapPresentAntenna
+	RadioTapPresentDBAntennaSignal
+	RadioTapPresentDBAntennaNoise
+	RadioTapPresentRxFlags
+	RadioTapPresentTxFlags
+	RadioTapPresentRtsRetries
+	RadioTapPresentDataRetries
+	_
+	RadioTapPresentMCS
+	RadioTapPresentAMPDUStatus
+	RadioTapPresentVHT
+	RadioTapPresentEXT RadioTapPresent = 1 << 31
+)
+
+func (r RadioTapPresent) TSFT() bool {
+	return r&RadioTapPresentTSFT != 0
+}
+func (r RadioTapPresent) Flags() bool {
+	return r&RadioTapPresentFlags != 0
+}
+func (r RadioTapPresent) Rate() bool {
+	return r&RadioTapPresentRate != 0
+}
+func (r RadioTapPresent) Channel() bool {
+	return r&RadioTapPresentChannel != 0
+}
+func (r RadioTapPresent) FHSS() bool {
+	return r&RadioTapPresentFHSS != 0
+}
+func (r RadioTapPresent) DBMAntennaSignal() bool {
+	return r&RadioTapPresentDBMAntennaSignal != 0
+}
+func (r RadioTapPresent) DBMAntennaNoise() bool {
+	return r&RadioTapPresentDBMAntennaNoise != 0
+}
+func (r RadioTapPresent) LockQuality() bool {
+	return r&RadioTapPresentLockQuality != 0
+}
+func (r RadioTapPresent) TxAttenuation() bool {
+	return r&RadioTapPresentTxAttenuation != 0
+}
+func (r RadioTapPresent) DBTxAttenuation() bool {
+	return r&RadioTapPresentDBTxAttenuation != 0
+}
+func (r RadioTapPresent) DBMTxPower() bool {
+	return r&RadioTapPresentDBMTxPower != 0
+}
+func (r RadioTapPresent) Antenna() bool {
+	return r&RadioTapPresentAntenna != 0
+}
+func (r RadioTapPresent) DBAntennaSignal() bool {
+	return r&RadioTapPresentDBAntennaSignal != 0
+}
+func (r RadioTapPresent) DBAntennaNoise() bool {
+	return r&RadioTapPresentDBAntennaNoise != 0
+}
+func (r RadioTapPresent) RxFlags() bool {
+	return r&RadioTapPresentRxFlags != 0
+}
+func (r RadioTapPresent) TxFlags() bool {
+	return r&RadioTapPresentTxFlags != 0
+}
+func (r RadioTapPresent) RtsRetries() bool {
+	return r&RadioTapPresentRtsRetries != 0
+}
+func (r RadioTapPresent) DataRetries() bool {
+	return r&RadioTapPresentDataRetries != 0
+}
+func (r RadioTapPresent) MCS() bool {
+	return r&RadioTapPresentMCS != 0
+}
+func (r RadioTapPresent) AMPDUStatus() bool {
+	return r&RadioTapPresentAMPDUStatus != 0
+}
+func (r RadioTapPresent) VHT() bool {
+	return r&RadioTapPresentVHT != 0
+}
+func (r RadioTapPresent) EXT() bool {
+	return r&RadioTapPresentEXT != 0
+}
+
+type RadioTapChannelFlags uint16
+
+const (
+	RadioTapChannelFlagsTurbo   RadioTapChannelFlags = 0x0010 // Turbo channel
+	RadioTapChannelFlagsCCK     RadioTapChannelFlags = 0x0020 // CCK channel
+	RadioTapChannelFlagsOFDM    RadioTapChannelFlags = 0x0040 // OFDM channel
+	RadioTapChannelFlagsGhz2    RadioTapChannelFlags = 0x0080 // 2 GHz spectrum channel.
+	RadioTapChannelFlagsGhz5    RadioTapChannelFlags = 0x0100 // 5 GHz spectrum channel
+	RadioTapChannelFlagsPassive RadioTapChannelFlags = 0x0200 // Only passive scan allowed
+	RadioTapChannelFlagsDynamic RadioTapChannelFlags = 0x0400 // Dynamic CCK-OFDM channel
+	RadioTapChannelFlagsGFSK    RadioTapChannelFlags = 0x0800 // GFSK channel (FHSS PHY)
+)
+
+func (r RadioTapChannelFlags) Turbo() bool {
+	return r&RadioTapChannelFlagsTurbo != 0
+}
+func (r RadioTapChannelFlags) CCK() bool {
+	return r&RadioTapChannelFlagsCCK != 0
+}
+func (r RadioTapChannelFlags) OFDM() bool {
+	return r&RadioTapChannelFlagsOFDM != 0
+}
+func (r RadioTapChannelFlags) Ghz2() bool {
+	return r&RadioTapChannelFlagsGhz2 != 0
+}
+func (r RadioTapChannelFlags) Ghz5() bool {
+	return r&RadioTapChannelFlagsGhz5 != 0
+}
+func (r RadioTapChannelFlags) Passive() bool {
+	return r&RadioTapChannelFlagsPassive != 0
+}
+func (r RadioTapChannelFlags) Dynamic() bool {
+	return r&RadioTapChannelFlagsDynamic != 0
+}
+func (r RadioTapChannelFlags) GFSK() bool {
+	return r&RadioTapChannelFlagsGFSK != 0
+}
+
+// String provides a human readable string for RadioTapChannelFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapChannelFlags value, not its string.
+func (a RadioTapChannelFlags) String() string {
+	var out bytes.Buffer
+	if a.Turbo() {
+		out.WriteString("Turbo,")
+	}
+	if a.CCK() {
+		out.WriteString("CCK,")
+	}
+	if a.OFDM() {
+		out.WriteString("OFDM,")
+	}
+	if a.Ghz2() {
+		out.WriteString("Ghz2,")
+	}
+	if a.Ghz5() {
+		out.WriteString("Ghz5,")
+	}
+	if a.Passive() {
+		out.WriteString("Passive,")
+	}
+	if a.Dynamic() {
+		out.WriteString("Dynamic,")
+	}
+	if a.GFSK() {
+		out.WriteString("GFSK,")
+	}
+
+	if length := out.Len(); length > 0 {
+		return string(out.Bytes()[:length-1]) // strip final comma
+	}
+	return ""
+}
+
+type RadioTapFlags uint8
+
+const (
+	RadioTapFlagsCFP           RadioTapFlags = 1 << iota // sent/received during CFP
+	RadioTapFlagsShortPreamble                           // sent/received * with short * preamble
+	RadioTapFlagsWEP                                     // sent/received * with WEP encryption
+	RadioTapFlagsFrag                                    // sent/received * with fragmentation
+	RadioTapFlagsFCS                                     // frame includes FCS
+	RadioTapFlagsDatapad                                 // frame has padding between * 802.11 header and payload * (to 32-bit boundary)
+	RadioTapFlagsBadFCS                                  // does not pass FCS check
+	RadioTapFlagsShortGI                                 // HT short GI
+)
+
+func (r RadioTapFlags) CFP() bool {
+	return r&RadioTapFlagsCFP != 0
+}
+func (r RadioTapFlags) ShortPreamble() bool {
+	return r&RadioTapFlagsShortPreamble != 0
+}
+func (r RadioTapFlags) WEP() bool {
+	return r&RadioTapFlagsWEP != 0
+}
+func (r RadioTapFlags) Frag() bool {
+	return r&RadioTapFlagsFrag != 0
+}
+func (r RadioTapFlags) FCS() bool {
+	return r&RadioTapFlagsFCS != 0
+}
+func (r RadioTapFlags) Datapad() bool {
+	return r&RadioTapFlagsDatapad != 0
+}
+func (r RadioTapFlags) BadFCS() bool {
+	return r&RadioTapFlagsBadFCS != 0
+}
+func (r RadioTapFlags) ShortGI() bool {
+	return r&RadioTapFlagsShortGI != 0
+}
+
+// String provides a human readable string for RadioTapFlags.
+// This string is possibly subject to change over time; if you're storing this
+// persistently, you should probably store the RadioTapFlags value, not its string.
+func (a RadioTapFlags) String() string {
+	var out bytes.Buffer
+	if a.CFP() {
+		out.WriteString("CFP,")
+	}
+	if a.ShortPreamble() {
+		out.WriteString("SHORT-PREAMBLE,")
+	}
+	if a.WEP() {
+		out.WriteString("WEP,")
+	}
+	if a.Frag() {
+		out.WriteString("FRAG,")
+	}
+	if a.FCS() {
+		out.WriteString("FCS,")
+	}
+	if a.Datapad() {
+		out.WriteString("DATAPAD,")
+	}
+	if a.ShortGI() {
+		out.WriteString("SHORT-GI,")
+	}
+
+	if length := out.Len(); length > 0 {
+		return string(out.Bytes()[:length-1]) // strip final comma
+	}
+	return ""
+}
+
+type RadioTapRate uint8
+
+func (a RadioTapRate) String() string {
+	return fmt.Sprintf("%v Mb/s", 0.5*float32(a))
+}
+
+type RadioTapChannelFrequency uint16
+
+func (a RadioTapChannelFrequency) String() string {
+	return fmt.Sprintf("%d MHz", a)
+}
+
+type RadioTapRxFlags uint16
+
+const (
+	RadioTapRxFlagsBadPlcp RadioTapRxFlags = 0x0002
+)
+
+func (self RadioTapRxFlags) BadPlcp() bool {
+	return self&RadioTapRxFlagsBadPlcp != 0
+}
+
+func (self RadioTapRxFlags) String() string {
+	if self.BadPlcp() {
+		return "BADPLCP"
+	}
+	return ""
+}
+
+type RadioTapTxFlags uint16
+
+const (
+	RadioTapTxFlagsFail RadioTapTxFlags = 1 << iota
+	RadioTapTxFlagsCTS
+	RadioTapTxFlagsRTS
+	RadioTapTxFlagsNoACK
+)
+
+func (self RadioTapTxFlags) Fail() bool  { return self&RadioTapTxFlagsFail != 0 }
+func (self RadioTapTxFlags) CTS() bool   { return self&RadioTapTxFlagsCTS != 0 }
+func (self RadioTapTxFlags) RTS() bool   { return self&RadioTapTxFlagsRTS != 0 }
+func (self RadioTapTxFlags) NoACK() bool { return self&RadioTapTxFlagsNoACK != 0 }
+
+func (self RadioTapTxFlags) String() string {
+	var tokens []string
+	if self.Fail() {
+		tokens = append(tokens, "Fail")
+	}
+	if self.CTS() {
+		tokens = append(tokens, "CTS")
+	}
+	if self.RTS() {
+		tokens = append(tokens, "RTS")
+	}
+	if self.NoACK() {
+		tokens = append(tokens, "NoACK")
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapMCS struct {
+	Known RadioTapMCSKnown
+	Flags RadioTapMCSFlags
+	MCS   uint8
+}
+
+func (self RadioTapMCS) String() string {
+	var tokens []string
+	if self.Known.Bandwidth() {
+		token := "?"
+		switch self.Flags.Bandwidth() {
+		case 0:
+			token = "20"
+		case 1:
+			token = "40"
+		case 2:
+			token = "40(20L)"
+		case 3:
+			token = "40(20U)"
+		}
+		tokens = append(tokens, token)
+	}
+	if self.Known.MCSIndex() {
+		tokens = append(tokens, fmt.Sprintf("MCSIndex#%d", self.MCS))
+	}
+	if self.Known.GuardInterval() {
+		if self.Flags.ShortGI() {
+			tokens = append(tokens, fmt.Sprintf("shortGI"))
+		} else {
+			tokens = append(tokens, fmt.Sprintf("longGI"))
+		}
+	}
+	if self.Known.HTFormat() {
+		if self.Flags.Greenfield() {
+			tokens = append(tokens, fmt.Sprintf("HT-greenfield"))
+		} else {
+			tokens = append(tokens, fmt.Sprintf("HT-mixed"))
+		}
+	}
+	if self.Known.FECType() {
+		if self.Flags.FECLDPC() {
+			tokens = append(tokens, fmt.Sprintf("LDPC"))
+		} else {
+			tokens = append(tokens, fmt.Sprintf("BCC"))
+		}
+	}
+	if self.Known.STBC() {
+		tokens = append(tokens, fmt.Sprintf("STBC#%d", self.Flags.STBC()))
+	}
+	if self.Known.NESS() {
+		num := 0
+		if self.Known.NESS1() {
+			num |= 0x02
+		}
+		if self.Flags.NESS0() {
+			num |= 0x01
+		}
+		tokens = append(tokens, fmt.Sprintf("num-of-ESS#%d", num))
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapMCSKnown uint8
+
+const (
+	RadioTapMCSKnownBandwidth RadioTapMCSKnown = 1 << iota
+	RadioTapMCSKnownMCSIndex
+	RadioTapMCSKnownGuardInterval
+	RadioTapMCSKnownHTFormat
+	RadioTapMCSKnownFECType
+	RadioTapMCSKnownSTBC
+	RadioTapMCSKnownNESS
+	RadioTapMCSKnownNESS1
+)
+
+func (self RadioTapMCSKnown) Bandwidth() bool     { return self&RadioTapMCSKnownBandwidth != 0 }
+func (self RadioTapMCSKnown) MCSIndex() bool      { return self&RadioTapMCSKnownMCSIndex != 0 }
+func (self RadioTapMCSKnown) GuardInterval() bool { return self&RadioTapMCSKnownGuardInterval != 0 }
+func (self RadioTapMCSKnown) HTFormat() bool      { return self&RadioTapMCSKnownHTFormat != 0 }
+func (self RadioTapMCSKnown) FECType() bool       { return self&RadioTapMCSKnownFECType != 0 }
+func (self RadioTapMCSKnown) STBC() bool          { return self&RadioTapMCSKnownSTBC != 0 }
+func (self RadioTapMCSKnown) NESS() bool          { return self&RadioTapMCSKnownNESS != 0 }
+func (self RadioTapMCSKnown) NESS1() bool         { return self&RadioTapMCSKnownNESS1 != 0 }
+
+type RadioTapMCSFlags uint8
+
+const (
+	RadioTapMCSFlagsBandwidthMask RadioTapMCSFlags = 0x03
+	RadioTapMCSFlagsShortGI                        = 0x04
+	RadioTapMCSFlagsGreenfield                     = 0x08
+	RadioTapMCSFlagsFECLDPC                        = 0x10
+	RadioTapMCSFlagsSTBCMask                       = 0x60
+	RadioTapMCSFlagsNESS0                          = 0x80
+)
+
+func (self RadioTapMCSFlags) Bandwidth() int {
+	return int(self & RadioTapMCSFlagsBandwidthMask)
+}
+func (self RadioTapMCSFlags) ShortGI() bool    { return self&RadioTapMCSFlagsShortGI != 0 }
+func (self RadioTapMCSFlags) Greenfield() bool { return self&RadioTapMCSFlagsGreenfield != 0 }
+func (self RadioTapMCSFlags) FECLDPC() bool    { return self&RadioTapMCSFlagsFECLDPC != 0 }
+func (self RadioTapMCSFlags) STBC() int {
+	return int(self&RadioTapMCSFlagsSTBCMask) >> 5
+}
+func (self RadioTapMCSFlags) NESS0() bool { return self&RadioTapMCSFlagsNESS0 != 0 }
+
+type RadioTapAMPDUStatus struct {
+	Reference uint32
+	Flags     RadioTapAMPDUStatusFlags
+	CRC       uint8
+}
+
+func (self RadioTapAMPDUStatus) String() string {
+	tokens := []string{
+		fmt.Sprintf("ref#%x", self.Reference),
+	}
+	if self.Flags.ReportZerolen() && self.Flags.IsZerolen() {
+		tokens = append(tokens, fmt.Sprintf("zero-length"))
+	}
+	if self.Flags.LastKnown() && self.Flags.IsLast() {
+		tokens = append(tokens, "last")
+	}
+	if self.Flags.DelimCRCErr() {
+		tokens = append(tokens, "delimiter CRC error")
+	}
+	if self.Flags.DelimCRCKnown() {
+		tokens = append(tokens, fmt.Sprintf("delimiter-CRC=%02x", self.CRC))
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapAMPDUStatusFlags uint16
+
+const (
+	RadioTapAMPDUStatusFlagsReportZerolen RadioTapAMPDUStatusFlags = 1 << iota
+	RadioTapAMPDUIsZerolen
+	RadioTapAMPDULastKnown
+	RadioTapAMPDUIsLast
+	RadioTapAMPDUDelimCRCErr
+	RadioTapAMPDUDelimCRCKnown
+)
+
+func (self RadioTapAMPDUStatusFlags) ReportZerolen() bool {
+	return self&RadioTapAMPDUStatusFlagsReportZerolen != 0
+}
+func (self RadioTapAMPDUStatusFlags) IsZerolen() bool   { return self&RadioTapAMPDUIsZerolen != 0 }
+func (self RadioTapAMPDUStatusFlags) LastKnown() bool   { return self&RadioTapAMPDULastKnown != 0 }
+func (self RadioTapAMPDUStatusFlags) IsLast() bool      { return self&RadioTapAMPDUIsLast != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCErr() bool { return self&RadioTapAMPDUDelimCRCErr != 0 }
+func (self RadioTapAMPDUStatusFlags) DelimCRCKnown() bool {
+	return self&RadioTapAMPDUDelimCRCKnown != 0
+}
+
+type RadioTapVHT struct {
+	Known      RadioTapVHTKnown
+	Flags      RadioTapVHTFlags
+	Bandwidth  uint8
+	MCSNSS     [4]RadioTapVHTMCSNSS
+	Coding     uint8
+	GroupId    uint8
+	PartialAID uint16
+}
+
+func (self RadioTapVHT) String() string {
+	var tokens []string
+	if self.Known.STBC() {
+		if self.Flags.STBC() {
+			tokens = append(tokens, "STBC")
+		} else {
+			tokens = append(tokens, "no STBC")
+		}
+	}
+	if self.Known.TXOPPSNotAllowed() {
+		if self.Flags.TXOPPSNotAllowed() {
+			tokens = append(tokens, "TXOP doze not allowed")
+		} else {
+			tokens = append(tokens, "TXOP doze allowed")
+		}
+	}
+	if self.Known.GI() {
+		if self.Flags.SGI() {
+			tokens = append(tokens, "short GI")
+		} else {
+			tokens = append(tokens, "long GI")
+		}
+	}
+	if self.Known.SGINSYMDisambiguation() {
+		if self.Flags.SGINSYMMod() {
+			tokens = append(tokens, "NSYM mod 10=9")
+		} else {
+			tokens = append(tokens, "NSYM mod 10!=9 or no short GI")
+		}
+	}
+	if self.Known.LDPCExtraOFDMSymbol() {
+		if self.Flags.LDPCExtraOFDMSymbol() {
+			tokens = append(tokens, "LDPC extra OFDM symbols")
+		} else {
+			tokens = append(tokens, "no LDPC extra OFDM symbols")
+		}
+	}
+	if self.Known.Beamformed() {
+		if self.Flags.Beamformed() {
+			tokens = append(tokens, "beamformed")
+		} else {
+			tokens = append(tokens, "no beamformed")
+		}
+	}
+	if self.Known.Bandwidth() {
+		token := "?"
+		switch self.Bandwidth & 0x1f {
+		case 0:
+			token = "20"
+		case 1:
+			token = "40"
+		case 2:
+			token = "40(20L)"
+		case 3:
+			token = "40(20U)"
+		case 4:
+			token = "80"
+		case 5:
+			token = "80(40L)"
+		case 6:
+			token = "80(40U)"
+		case 7:
+			token = "80(20LL)"
+		case 8:
+			token = "80(20LU)"
+		case 9:
+			token = "80(20UL)"
+		case 10:
+			token = "80(20UU)"
+		case 11:
+			token = "160"
+		case 12:
+			token = "160(80L)"
+		case 13:
+			token = "160(80U)"
+		case 14:
+			token = "160(40LL)"
+		case 15:
+			token = "160(40LU)"
+		case 16:
+			token = "160(40UL)"
+		case 17:
+			token = "160(40UU)"
+		case 18:
+			token = "160(20LLL)"
+		case 19:
+			token = "160(20LLU)"
+		case 20:
+			token = "160(20LUL)"
+		case 21:
+			token = "160(20LUU)"
+		case 22:
+			token = "160(20ULL)"
+		case 23:
+			token = "160(20ULU)"
+		case 24:
+			token = "160(20UUL)"
+		case 25:
+			token = "160(20UUU)"
+		}
+		tokens = append(tokens, token)
+	}
+	for i, MCSNSS := range self.MCSNSS {
+		if MCSNSS.Present() {
+			fec := "?"
+			switch self.Coding & (1 << uint8(i)) {
+			case 0:
+				fec = "BCC"
+			case 1:
+				fec = "LDPC"
+			}
+			tokens = append(tokens, fmt.Sprintf("user%d(%s,%s)", i, MCSNSS.String(), fec))
+		}
+	}
+	if self.Known.GroupId() {
+		tokens = append(tokens,
+			fmt.Sprintf("group=%d", self.GroupId))
+	}
+	if self.Known.PartialAID() {
+		tokens = append(tokens,
+			fmt.Sprintf("partial-AID=%d", self.PartialAID))
+	}
+	return strings.Join(tokens, ",")
+}
+
+type RadioTapVHTKnown uint16
+
+const (
+	RadioTapVHTKnownSTBC RadioTapVHTKnown = 1 << iota
+	RadioTapVHTKnownTXOPPSNotAllowed
+	RadioTapVHTKnownGI
+	RadioTapVHTKnownSGINSYMDisambiguation
+	RadioTapVHTKnownLDPCExtraOFDMSymbol
+	RadioTapVHTKnownBeamformed
+	RadioTapVHTKnownBandwidth
+	RadioTapVHTKnownGroupId
+	RadioTapVHTKnownPartialAID
+)
+
+func (self RadioTapVHTKnown) STBC() bool { return self&RadioTapVHTKnownSTBC != 0 }
+func (self RadioTapVHTKnown) TXOPPSNotAllowed() bool {
+	return self&RadioTapVHTKnownTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTKnown) GI() bool { return self&RadioTapVHTKnownGI != 0 }
+func (self RadioTapVHTKnown) SGINSYMDisambiguation() bool {
+	return self&RadioTapVHTKnownSGINSYMDisambiguation != 0
+}
+func (self RadioTapVHTKnown) LDPCExtraOFDMSymbol() bool {
+	return self&RadioTapVHTKnownLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTKnown) Beamformed() bool { return self&RadioTapVHTKnownBeamformed != 0 }
+func (self RadioTapVHTKnown) Bandwidth() bool  { return self&RadioTapVHTKnownBandwidth != 0 }
+func (self RadioTapVHTKnown) GroupId() bool    { return self&RadioTapVHTKnownGroupId != 0 }
+func (self RadioTapVHTKnown) PartialAID() bool { return self&RadioTapVHTKnownPartialAID != 0 }
+
+type RadioTapVHTFlags uint8
+
+const (
+	RadioTapVHTFlagsSTBC RadioTapVHTFlags = 1 << iota
+	RadioTapVHTFlagsTXOPPSNotAllowed
+	RadioTapVHTFlagsSGI
+	RadioTapVHTFlagsSGINSYMMod
+	RadioTapVHTFlagsLDPCExtraOFDMSymbol
+	RadioTapVHTFlagsBeamformed
+)
+
+func (self RadioTapVHTFlags) STBC() bool { return self&RadioTapVHTFlagsSTBC != 0 }
+func (self RadioTapVHTFlags) TXOPPSNotAllowed() bool {
+	return self&RadioTapVHTFlagsTXOPPSNotAllowed != 0
+}
+func (self RadioTapVHTFlags) SGI() bool        { return self&RadioTapVHTFlagsSGI != 0 }
+func (self RadioTapVHTFlags) SGINSYMMod() bool { return self&RadioTapVHTFlagsSGINSYMMod != 0 }
+func (self RadioTapVHTFlags) LDPCExtraOFDMSymbol() bool {
+	return self&RadioTapVHTFlagsLDPCExtraOFDMSymbol != 0
+}
+func (self RadioTapVHTFlags) Beamformed() bool { return self&RadioTapVHTFlagsBeamformed != 0 }
+
+type RadioTapVHTMCSNSS uint8
+
+func (self RadioTapVHTMCSNSS) Present() bool {
+	return self&0x0F != 0
+}
+
+func (self RadioTapVHTMCSNSS) String() string {
+	return fmt.Sprintf("NSS#%dMCS#%d", uint32(self&0xf), uint32(self>>4))
+}
+
+func decodeRadioTap(data []byte, p gopacket.PacketBuilder) error {
+	d := &RadioTap{}
+	// TODO: Should we set LinkLayer here? And implement LinkFlow
+	return decodingLayerDecoder(d, data, p)
+}
+
+type RadioTap struct {
+	BaseLayer
+
+	// Version 0. Only increases for drastic changes, introduction of compatible new fields does not count.
+	Version uint8
+	// Length of the whole header in bytes, including it_version, it_pad, it_len, and data fields.
+	Length uint16
+	// Present is a bitmap telling which fields are present. Set bit 31 (0x80000000) to extend the bitmap by another 32 bits. Additional extensions are made by setting bit 31.
+	Present RadioTapPresent
+	// TSFT: value in microseconds of the MAC's 64-bit 802.11 Time Synchronization Function timer when the first bit of the MPDU arrived at the MAC. For received frames, only.
+	TSFT  uint64
+	Flags RadioTapFlags
+	// Rate Tx/Rx data rate
+	Rate RadioTapRate
+	// ChannelFrequency Tx/Rx frequency in MHz, followed by flags
+	ChannelFrequency RadioTapChannelFrequency
+	ChannelFlags     RadioTapChannelFlags
+	// FHSS For frequency-hopping radios, the hop set (first byte) and pattern (second byte).
+	FHSS uint16
+	// DBMAntennaSignal RF signal power at the antenna, decibel difference from one milliwatt.
+	DBMAntennaSignal int8
+	// DBMAntennaNoise RF noise power at the antenna, decibel difference from one milliwatt.
+	DBMAntennaNoise int8
+	// LockQuality Quality of Barker code lock. Unitless. Monotonically nondecreasing with "better" lock strength. Called "Signal Quality" in datasheets.
+	LockQuality uint16
+	// TxAttenuation Transmit power expressed as unitless distance from max power set at factory calibration.  0 is max power. Monotonically nondecreasing with lower power levels.
+	TxAttenuation uint16
+	// DBTxAttenuation Transmit power expressed as decibel distance from max power set at factory calibration.  0 is max power.  Monotonically nondecreasing with lower power levels.
+	DBTxAttenuation uint16
+	// DBMTxPower Transmit power expressed as dBm (decibels from a 1 milliwatt reference). This is the absolute power level measured at the antenna port.
+	DBMTxPower int8
+	// Antenna Unitless indication of the Rx/Tx antenna for this packet. The first antenna is antenna 0.
+	Antenna uint8
+	// DBAntennaSignal RF signal power at the antenna, decibel difference from an arbitrary, fixed reference.
+	DBAntennaSignal uint8
+	// DBAntennaNoise RF noise power at the antenna, decibel difference from an arbitrary, fixed reference point.
+	DBAntennaNoise uint8
+	//
+	RxFlags     RadioTapRxFlags
+	TxFlags     RadioTapTxFlags
+	RtsRetries  uint8
+	DataRetries uint8
+	MCS         RadioTapMCS
+	AMPDUStatus RadioTapAMPDUStatus
+	VHT         RadioTapVHT
+}
+
+func (m *RadioTap) LayerType() gopacket.LayerType { return LayerTypeRadioTap }
+
+func (m *RadioTap) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return errors.New("RadioTap too small")
+	}
+	m.Version = uint8(data[0])
+	m.Length = binary.LittleEndian.Uint16(data[2:4])
+	m.Present = RadioTapPresent(binary.LittleEndian.Uint32(data[4:8]))
+
+	offset := uint16(4)
+
+	for (binary.LittleEndian.Uint32(data[offset:offset+4]) & 0x80000000) != 0 {
+		// This parser only handles standard radiotap namespace,
+		// and expects all fields are packed in the first it_present.
+		// Extended bitmap will be just ignored.
+		offset += 4
+	}
+	offset += 4 // skip the bitmap
+
+	if m.Present.TSFT() {
+		offset += align(offset, 8)
+		m.TSFT = binary.LittleEndian.Uint64(data[offset : offset+8])
+		offset += 8
+	}
+	if m.Present.Flags() {
+		m.Flags = RadioTapFlags(data[offset])
+		offset++
+	}
+	if m.Present.Rate() {
+		m.Rate = RadioTapRate(data[offset])
+		offset++
+	}
+	if m.Present.Channel() {
+		offset += align(offset, 2)
+		m.ChannelFrequency = RadioTapChannelFrequency(binary.LittleEndian.Uint16(data[offset : offset+2]))
+		offset += 2
+		m.ChannelFlags = RadioTapChannelFlags(binary.LittleEndian.Uint16(data[offset : offset+2]))
+		offset += 2
+	}
+	if m.Present.FHSS() {
+		m.FHSS = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.DBMAntennaSignal() {
+		m.DBMAntennaSignal = int8(data[offset])
+		offset++
+	}
+	if m.Present.DBMAntennaNoise() {
+		m.DBMAntennaNoise = int8(data[offset])
+		offset++
+	}
+	if m.Present.LockQuality() {
+		offset += align(offset, 2)
+		m.LockQuality = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.TxAttenuation() {
+		offset += align(offset, 2)
+		m.TxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.DBTxAttenuation() {
+		offset += align(offset, 2)
+		m.DBTxAttenuation = binary.LittleEndian.Uint16(data[offset : offset+2])
+		offset += 2
+	}
+	if m.Present.DBMTxPower() {
+		m.DBMTxPower = int8(data[offset])
+		offset++
+	}
+	if m.Present.Antenna() {
+		m.Antenna = uint8(data[offset])
+		offset++
+	}
+	if m.Present.DBAntennaSignal() {
+		m.DBAntennaSignal = uint8(data[offset])
+		offset++
+	}
+	if m.Present.DBAntennaNoise() {
+		m.DBAntennaNoise = uint8(data[offset])
+		offset++
+	}
+	if m.Present.RxFlags() {
+		offset += align(offset, 2)
+		m.RxFlags = RadioTapRxFlags(binary.LittleEndian.Uint16(data[offset:]))
+		offset += 2
+	}
+	if m.Present.TxFlags() {
+		offset += align(offset, 2)
+		m.TxFlags = RadioTapTxFlags(binary.LittleEndian.Uint16(data[offset:]))
+		offset += 2
+	}
+	if m.Present.RtsRetries() {
+		m.RtsRetries = uint8(data[offset])
+		offset++
+	}
+	if m.Present.DataRetries() {
+		m.DataRetries = uint8(data[offset])
+		offset++
+	}
+	if m.Present.MCS() {
+		m.MCS = RadioTapMCS{
+			RadioTapMCSKnown(data[offset]),
+			RadioTapMCSFlags(data[offset+1]),
+			uint8(data[offset+2]),
+		}
+		offset += 3
+	}
+	if m.Present.AMPDUStatus() {
+		offset += align(offset, 4)
+		m.AMPDUStatus = RadioTapAMPDUStatus{
+			Reference: binary.LittleEndian.Uint32(data[offset:]),
+			Flags:     RadioTapAMPDUStatusFlags(binary.LittleEndian.Uint16(data[offset+4:])),
+			CRC:       uint8(data[offset+6]),
+		}
+		offset += 8
+	}
+	if m.Present.VHT() {
+		offset += align(offset, 2)
+		m.VHT = RadioTapVHT{
+			Known:     RadioTapVHTKnown(binary.LittleEndian.Uint16(data[offset:])),
+			Flags:     RadioTapVHTFlags(data[offset+2]),
+			Bandwidth: uint8(data[offset+3]),
+			MCSNSS: [4]RadioTapVHTMCSNSS{
+				RadioTapVHTMCSNSS(data[offset+4]),
+				RadioTapVHTMCSNSS(data[offset+5]),
+				RadioTapVHTMCSNSS(data[offset+6]),
+				RadioTapVHTMCSNSS(data[offset+7]),
+			},
+			Coding:     uint8(data[offset+8]),
+			GroupId:    uint8(data[offset+9]),
+			PartialAID: binary.LittleEndian.Uint16(data[offset+10:]),
+		}
+		offset += 12
+	}
+
+	payload := data[m.Length:]
+
+	// Remove non standard padding used by some Wi-Fi drivers
+	if m.Flags.Datapad() &&
+		payload[0]&0xC == 0x8 { //&& // Data frame
+		headlen := 24
+		if payload[0]&0x8C == 0x88 { // QoS
+			headlen += 2
+		}
+		if payload[1]&0x3 == 0x3 { // 4 addresses
+			headlen += 2
+		}
+		if headlen%4 == 2 {
+			payload = append(payload[:headlen], payload[headlen+2:len(payload)]...)
+		}
+	}
+
+	if !m.Flags.FCS() {
+		// Dot11.DecodeFromBytes() expects FCS present and performs a hard chop on the checksum
+		// If a user is handing in subslices or packets from a buffered stream, the capacity of the slice
+		// may extend beyond the len, rather than expecting callers to enforce cap==len on every packet
+		// we take the hit in this one case and do a reallocation.  If the user DOES enforce cap==len
+		// then the reallocation will happen anyway on the append.  This is requried because the append
+		// write to the memory directly after the payload if there is sufficient capacity, which callers
+		// may not expect.
+		reallocPayload := make([]byte, len(payload)+4)
+		copy(reallocPayload[0:len(payload)], payload)
+		h := crc32.NewIEEE()
+		h.Write(payload)
+		binary.LittleEndian.PutUint32(reallocPayload[len(payload):], h.Sum32())
+		payload = reallocPayload
+	}
+	m.BaseLayer = BaseLayer{Contents: data[:m.Length], Payload: payload}
+
+	return nil
+}
+
+func (m RadioTap) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	buf := make([]byte, 1024)
+
+	buf[0] = m.Version
+	buf[1] = 0
+
+	binary.LittleEndian.PutUint32(buf[4:8], uint32(m.Present))
+
+	offset := uint16(4)
+
+	for (binary.LittleEndian.Uint32(buf[offset:offset+4]) & 0x80000000) != 0 {
+		offset += 4
+	}
+
+	offset += 4
+
+	if m.Present.TSFT() {
+		offset += align(offset, 8)
+		binary.LittleEndian.PutUint64(buf[offset:offset+8], m.TSFT)
+		offset += 8
+	}
+
+	if m.Present.Flags() {
+		buf[offset] = uint8(m.Flags)
+		offset++
+	}
+
+	if m.Present.Rate() {
+		buf[offset] = uint8(m.Rate)
+		offset++
+	}
+
+	if m.Present.Channel() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFrequency))
+		offset += 2
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.ChannelFlags))
+		offset += 2
+	}
+
+	if m.Present.FHSS() {
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.FHSS)
+		offset += 2
+	}
+
+	if m.Present.DBMAntennaSignal() {
+		buf[offset] = byte(m.DBMAntennaSignal)
+		offset++
+	}
+
+	if m.Present.DBMAntennaNoise() {
+		buf[offset] = byte(m.DBMAntennaNoise)
+		offset++
+	}
+
+	if m.Present.LockQuality() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.LockQuality)
+		offset += 2
+	}
+
+	if m.Present.TxAttenuation() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.TxAttenuation)
+		offset += 2
+	}
+
+	if m.Present.DBTxAttenuation() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], m.DBTxAttenuation)
+		offset += 2
+	}
+
+	if m.Present.DBMTxPower() {
+		buf[offset] = byte(m.DBMTxPower)
+		offset++
+	}
+
+	if m.Present.Antenna() {
+		buf[offset] = uint8(m.Antenna)
+		offset++
+	}
+
+	if m.Present.DBAntennaSignal() {
+		buf[offset] = uint8(m.DBAntennaSignal)
+		offset++
+	}
+
+	if m.Present.DBAntennaNoise() {
+		buf[offset] = uint8(m.DBAntennaNoise)
+		offset++
+	}
+
+	if m.Present.RxFlags() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.RxFlags))
+		offset += 2
+	}
+
+	if m.Present.TxFlags() {
+		offset += align(offset, 2)
+		binary.LittleEndian.PutUint16(buf[offset:offset+2], uint16(m.TxFlags))
+		offset += 2
+	}
+
+	if m.Present.RtsRetries() {
+		buf[offset] = m.RtsRetries
+		offset++
+	}
+
+	if m.Present.DataRetries() {
+		buf[offset] = m.DataRetries
+		offset++
+	}
+
+	if m.Present.MCS() {
+		buf[offset] = uint8(m.MCS.Known)
+		buf[offset+1] = uint8(m.MCS.Flags)
+		buf[offset+2] = uint8(m.MCS.MCS)
+
+		offset += 3
+	}
+
+	if m.Present.AMPDUStatus() {
+		offset += align(offset, 4)
+
+		binary.LittleEndian.PutUint32(buf[offset:offset+4], m.AMPDUStatus.Reference)
+		binary.LittleEndian.PutUint16(buf[offset+4:offset+6], uint16(m.AMPDUStatus.Flags))
+
+		buf[offset+6] = m.AMPDUStatus.CRC
+
+		offset += 8
+	}
+
+	if m.Present.VHT() {
+		offset += align(offset, 2)
+
+		binary.LittleEndian.PutUint16(buf[offset:], uint16(m.VHT.Known))
+
+		buf[offset+2] = uint8(m.VHT.Flags)
+		buf[offset+3] = uint8(m.VHT.Bandwidth)
+		buf[offset+4] = uint8(m.VHT.MCSNSS[0])
+		buf[offset+5] = uint8(m.VHT.MCSNSS[1])
+		buf[offset+6] = uint8(m.VHT.MCSNSS[2])
+		buf[offset+7] = uint8(m.VHT.MCSNSS[3])
+		buf[offset+8] = uint8(m.VHT.Coding)
+		buf[offset+9] = uint8(m.VHT.GroupId)
+
+		binary.LittleEndian.PutUint16(buf[offset+10:offset+12], m.VHT.PartialAID)
+
+		offset += 12
+	}
+
+	packetBuf, err := b.PrependBytes(int(offset))
+
+	if err != nil {
+		return err
+	}
+
+	if opts.FixLengths {
+		m.Length = offset
+	}
+
+	binary.LittleEndian.PutUint16(buf[2:4], m.Length)
+
+	copy(packetBuf, buf)
+
+	return nil
+}
+
+func (m *RadioTap) CanDecode() gopacket.LayerClass    { return LayerTypeRadioTap }
+func (m *RadioTap) NextLayerType() gopacket.LayerType { return LayerTypeDot11 }
diff --git a/vendor/github.com/google/gopacket/layers/radius.go b/vendor/github.com/google/gopacket/layers/radius.go
new file mode 100644
index 0000000..c43ea29
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/radius.go
@@ -0,0 +1,560 @@
+// Copyright 2020 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+const (
+	// RFC 2865 3.  Packet Format
+	// `The minimum length is 20 and maximum length is 4096.`
+	radiusMinimumRecordSizeInBytes int = 20
+	radiusMaximumRecordSizeInBytes int = 4096
+
+	// RFC 2865 5.  Attributes
+	// `The Length field is one octet, and indicates the length of this Attribute including the Type, Length and Value fields.`
+	// `The Value field is zero or more octets and contains information specific to the Attribute.`
+	radiusAttributesMinimumRecordSizeInBytes int = 2
+)
+
+// RADIUS represents a Remote Authentication Dial In User Service layer.
+type RADIUS struct {
+	BaseLayer
+
+	Code          RADIUSCode
+	Identifier    RADIUSIdentifier
+	Length        RADIUSLength
+	Authenticator RADIUSAuthenticator
+	Attributes    []RADIUSAttribute
+}
+
+// RADIUSCode represents packet type.
+type RADIUSCode uint8
+
+// constants that define RADIUSCode.
+const (
+	RADIUSCodeAccessRequest      RADIUSCode = 1   // RFC2865 3.  Packet Format
+	RADIUSCodeAccessAccept       RADIUSCode = 2   // RFC2865 3.  Packet Format
+	RADIUSCodeAccessReject       RADIUSCode = 3   // RFC2865 3.  Packet Format
+	RADIUSCodeAccountingRequest  RADIUSCode = 4   // RFC2865 3.  Packet Format
+	RADIUSCodeAccountingResponse RADIUSCode = 5   // RFC2865 3.  Packet Format
+	RADIUSCodeAccessChallenge    RADIUSCode = 11  // RFC2865 3.  Packet Format
+	RADIUSCodeStatusServer       RADIUSCode = 12  // RFC2865 3.  Packet Format (experimental)
+	RADIUSCodeStatusClient       RADIUSCode = 13  // RFC2865 3.  Packet Format (experimental)
+	RADIUSCodeReserved           RADIUSCode = 255 // RFC2865 3.  Packet Format
+)
+
+// String returns a string version of a RADIUSCode.
+func (t RADIUSCode) String() (s string) {
+	switch t {
+	case RADIUSCodeAccessRequest:
+		s = "Access-Request"
+	case RADIUSCodeAccessAccept:
+		s = "Access-Accept"
+	case RADIUSCodeAccessReject:
+		s = "Access-Reject"
+	case RADIUSCodeAccountingRequest:
+		s = "Accounting-Request"
+	case RADIUSCodeAccountingResponse:
+		s = "Accounting-Response"
+	case RADIUSCodeAccessChallenge:
+		s = "Access-Challenge"
+	case RADIUSCodeStatusServer:
+		s = "Status-Server"
+	case RADIUSCodeStatusClient:
+		s = "Status-Client"
+	case RADIUSCodeReserved:
+		s = "Reserved"
+	default:
+		s = fmt.Sprintf("Unknown(%d)", t)
+	}
+	return
+}
+
+// RADIUSIdentifier represents packet identifier.
+type RADIUSIdentifier uint8
+
+// RADIUSLength represents packet length.
+type RADIUSLength uint16
+
+// RADIUSAuthenticator represents authenticator.
+type RADIUSAuthenticator [16]byte
+
+// RADIUSAttribute represents attributes.
+type RADIUSAttribute struct {
+	Type   RADIUSAttributeType
+	Length RADIUSAttributeLength
+	Value  RADIUSAttributeValue
+}
+
+// RADIUSAttributeType represents attribute type.
+type RADIUSAttributeType uint8
+
+// constants that define RADIUSAttributeType.
+const (
+	RADIUSAttributeTypeUserName               RADIUSAttributeType = 1  // RFC2865  5.1.  User-Name
+	RADIUSAttributeTypeUserPassword           RADIUSAttributeType = 2  // RFC2865  5.2.  User-Password
+	RADIUSAttributeTypeCHAPPassword           RADIUSAttributeType = 3  // RFC2865  5.3.  CHAP-Password
+	RADIUSAttributeTypeNASIPAddress           RADIUSAttributeType = 4  // RFC2865  5.4.  NAS-IP-Address
+	RADIUSAttributeTypeNASPort                RADIUSAttributeType = 5  // RFC2865  5.5.  NAS-Port
+	RADIUSAttributeTypeServiceType            RADIUSAttributeType = 6  // RFC2865  5.6.  Service-Type
+	RADIUSAttributeTypeFramedProtocol         RADIUSAttributeType = 7  // RFC2865  5.7.  Framed-Protocol
+	RADIUSAttributeTypeFramedIPAddress        RADIUSAttributeType = 8  // RFC2865  5.8.  Framed-IP-Address
+	RADIUSAttributeTypeFramedIPNetmask        RADIUSAttributeType = 9  // RFC2865  5.9.  Framed-IP-Netmask
+	RADIUSAttributeTypeFramedRouting          RADIUSAttributeType = 10 // RFC2865 5.10.  Framed-Routing
+	RADIUSAttributeTypeFilterId               RADIUSAttributeType = 11 // RFC2865 5.11.  Filter-Id
+	RADIUSAttributeTypeFramedMTU              RADIUSAttributeType = 12 // RFC2865 5.12.  Framed-MTU
+	RADIUSAttributeTypeFramedCompression      RADIUSAttributeType = 13 // RFC2865 5.13.  Framed-Compression
+	RADIUSAttributeTypeLoginIPHost            RADIUSAttributeType = 14 // RFC2865 5.14.  Login-IP-Host
+	RADIUSAttributeTypeLoginService           RADIUSAttributeType = 15 // RFC2865 5.15.  Login-Service
+	RADIUSAttributeTypeLoginTCPPort           RADIUSAttributeType = 16 // RFC2865 5.16.  Login-TCP-Port
+	RADIUSAttributeTypeReplyMessage           RADIUSAttributeType = 18 // RFC2865 5.18.  Reply-Message
+	RADIUSAttributeTypeCallbackNumber         RADIUSAttributeType = 19 // RFC2865 5.19.  Callback-Number
+	RADIUSAttributeTypeCallbackId             RADIUSAttributeType = 20 // RFC2865 5.20.  Callback-Id
+	RADIUSAttributeTypeFramedRoute            RADIUSAttributeType = 22 // RFC2865 5.22.  Framed-Route
+	RADIUSAttributeTypeFramedIPXNetwork       RADIUSAttributeType = 23 // RFC2865 5.23.  Framed-IPX-Network
+	RADIUSAttributeTypeState                  RADIUSAttributeType = 24 // RFC2865 5.24.  State
+	RADIUSAttributeTypeClass                  RADIUSAttributeType = 25 // RFC2865 5.25.  Class
+	RADIUSAttributeTypeVendorSpecific         RADIUSAttributeType = 26 // RFC2865 5.26.  Vendor-Specific
+	RADIUSAttributeTypeSessionTimeout         RADIUSAttributeType = 27 // RFC2865 5.27.  Session-Timeout
+	RADIUSAttributeTypeIdleTimeout            RADIUSAttributeType = 28 // RFC2865 5.28.  Idle-Timeout
+	RADIUSAttributeTypeTerminationAction      RADIUSAttributeType = 29 // RFC2865 5.29.  Termination-Action
+	RADIUSAttributeTypeCalledStationId        RADIUSAttributeType = 30 // RFC2865 5.30.  Called-Station-Id
+	RADIUSAttributeTypeCallingStationId       RADIUSAttributeType = 31 // RFC2865 5.31.  Calling-Station-Id
+	RADIUSAttributeTypeNASIdentifier          RADIUSAttributeType = 32 // RFC2865 5.32.  NAS-Identifier
+	RADIUSAttributeTypeProxyState             RADIUSAttributeType = 33 // RFC2865 5.33.  Proxy-State
+	RADIUSAttributeTypeLoginLATService        RADIUSAttributeType = 34 // RFC2865 5.34.  Login-LAT-Service
+	RADIUSAttributeTypeLoginLATNode           RADIUSAttributeType = 35 // RFC2865 5.35.  Login-LAT-Node
+	RADIUSAttributeTypeLoginLATGroup          RADIUSAttributeType = 36 // RFC2865 5.36.  Login-LAT-Group
+	RADIUSAttributeTypeFramedAppleTalkLink    RADIUSAttributeType = 37 // RFC2865 5.37.  Framed-AppleTalk-Link
+	RADIUSAttributeTypeFramedAppleTalkNetwork RADIUSAttributeType = 38 // RFC2865 5.38.  Framed-AppleTalk-Network
+	RADIUSAttributeTypeFramedAppleTalkZone    RADIUSAttributeType = 39 // RFC2865 5.39.  Framed-AppleTalk-Zone
+	RADIUSAttributeTypeAcctStatusType         RADIUSAttributeType = 40 // RFC2866  5.1.  Acct-Status-Type
+	RADIUSAttributeTypeAcctDelayTime          RADIUSAttributeType = 41 // RFC2866  5.2.  Acct-Delay-Time
+	RADIUSAttributeTypeAcctInputOctets        RADIUSAttributeType = 42 // RFC2866  5.3.  Acct-Input-Octets
+	RADIUSAttributeTypeAcctOutputOctets       RADIUSAttributeType = 43 // RFC2866  5.4.  Acct-Output-Octets
+	RADIUSAttributeTypeAcctSessionId          RADIUSAttributeType = 44 // RFC2866  5.5.  Acct-Session-Id
+	RADIUSAttributeTypeAcctAuthentic          RADIUSAttributeType = 45 // RFC2866  5.6.  Acct-Authentic
+	RADIUSAttributeTypeAcctSessionTime        RADIUSAttributeType = 46 // RFC2866  5.7.  Acct-Session-Time
+	RADIUSAttributeTypeAcctInputPackets       RADIUSAttributeType = 47 // RFC2866  5.8.  Acct-Input-Packets
+	RADIUSAttributeTypeAcctOutputPackets      RADIUSAttributeType = 48 // RFC2866  5.9.  Acct-Output-Packets
+	RADIUSAttributeTypeAcctTerminateCause     RADIUSAttributeType = 49 // RFC2866 5.10.  Acct-Terminate-Cause
+	RADIUSAttributeTypeAcctMultiSessionId     RADIUSAttributeType = 50 // RFC2866 5.11.  Acct-Multi-Session-Id
+	RADIUSAttributeTypeAcctLinkCount          RADIUSAttributeType = 51 // RFC2866 5.12.  Acct-Link-Count
+	RADIUSAttributeTypeAcctInputGigawords     RADIUSAttributeType = 52 // RFC2869  5.1.  Acct-Input-Gigawords
+	RADIUSAttributeTypeAcctOutputGigawords    RADIUSAttributeType = 53 // RFC2869  5.2.  Acct-Output-Gigawords
+	RADIUSAttributeTypeEventTimestamp         RADIUSAttributeType = 55 // RFC2869  5.3.  Event-Timestamp
+	RADIUSAttributeTypeCHAPChallenge          RADIUSAttributeType = 60 // RFC2865 5.40.  CHAP-Challenge
+	RADIUSAttributeTypeNASPortType            RADIUSAttributeType = 61 // RFC2865 5.41.  NAS-Port-Type
+	RADIUSAttributeTypePortLimit              RADIUSAttributeType = 62 // RFC2865 5.42.  Port-Limit
+	RADIUSAttributeTypeLoginLATPort           RADIUSAttributeType = 63 // RFC2865 5.43.  Login-LAT-Port
+	RADIUSAttributeTypeTunnelType             RADIUSAttributeType = 64 // RFC2868  3.1.  Tunnel-Type
+	RADIUSAttributeTypeTunnelMediumType       RADIUSAttributeType = 65 // RFC2868  3.2.  Tunnel-Medium-Type
+	RADIUSAttributeTypeTunnelClientEndpoint   RADIUSAttributeType = 66 // RFC2868  3.3.  Tunnel-Client-Endpoint
+	RADIUSAttributeTypeTunnelServerEndpoint   RADIUSAttributeType = 67 // RFC2868  3.4.  Tunnel-Server-Endpoint
+	RADIUSAttributeTypeAcctTunnelConnection   RADIUSAttributeType = 68 // RFC2867  4.1.  Acct-Tunnel-Connection
+	RADIUSAttributeTypeTunnelPassword         RADIUSAttributeType = 69 // RFC2868  3.5.  Tunnel-Password
+	RADIUSAttributeTypeARAPPassword           RADIUSAttributeType = 70 // RFC2869  5.4.  ARAP-Password
+	RADIUSAttributeTypeARAPFeatures           RADIUSAttributeType = 71 // RFC2869  5.5.  ARAP-Features
+	RADIUSAttributeTypeARAPZoneAccess         RADIUSAttributeType = 72 // RFC2869  5.6.  ARAP-Zone-Access
+	RADIUSAttributeTypeARAPSecurity           RADIUSAttributeType = 73 // RFC2869  5.7.  ARAP-Security
+	RADIUSAttributeTypeARAPSecurityData       RADIUSAttributeType = 74 // RFC2869  5.8.  ARAP-Security-Data
+	RADIUSAttributeTypePasswordRetry          RADIUSAttributeType = 75 // RFC2869  5.9.  Password-Retry
+	RADIUSAttributeTypePrompt                 RADIUSAttributeType = 76 // RFC2869 5.10.  Prompt
+	RADIUSAttributeTypeConnectInfo            RADIUSAttributeType = 77 // RFC2869 5.11.  Connect-Info
+	RADIUSAttributeTypeConfigurationToken     RADIUSAttributeType = 78 // RFC2869 5.12.  Configuration-Token
+	RADIUSAttributeTypeEAPMessage             RADIUSAttributeType = 79 // RFC2869 5.13.  EAP-Message
+	RADIUSAttributeTypeMessageAuthenticator   RADIUSAttributeType = 80 // RFC2869 5.14.  Message-Authenticator
+	RADIUSAttributeTypeTunnelPrivateGroupID   RADIUSAttributeType = 81 // RFC2868  3.6.  Tunnel-Private-Group-ID
+	RADIUSAttributeTypeTunnelAssignmentID     RADIUSAttributeType = 82 // RFC2868  3.7.  Tunnel-Assignment-ID
+	RADIUSAttributeTypeTunnelPreference       RADIUSAttributeType = 83 // RFC2868  3.8.  Tunnel-Preference
+	RADIUSAttributeTypeARAPChallengeResponse  RADIUSAttributeType = 84 // RFC2869 5.15.  ARAP-Challenge-Response
+	RADIUSAttributeTypeAcctInterimInterval    RADIUSAttributeType = 85 // RFC2869 5.16.  Acct-Interim-Interval
+	RADIUSAttributeTypeAcctTunnelPacketsLost  RADIUSAttributeType = 86 // RFC2867  4.2.  Acct-Tunnel-Packets-Lost
+	RADIUSAttributeTypeNASPortId              RADIUSAttributeType = 87 // RFC2869 5.17.  NAS-Port-Id
+	RADIUSAttributeTypeFramedPool             RADIUSAttributeType = 88 // RFC2869 5.18.  Framed-Pool
+	RADIUSAttributeTypeTunnelClientAuthID     RADIUSAttributeType = 90 // RFC2868  3.9.  Tunnel-Client-Auth-ID
+	RADIUSAttributeTypeTunnelServerAuthID     RADIUSAttributeType = 91 // RFC2868 3.10.  Tunnel-Server-Auth-ID
+)
+
+// RADIUSAttributeType represents attribute length.
+type RADIUSAttributeLength uint8
+
+// RADIUSAttributeType represents attribute value.
+type RADIUSAttributeValue []byte
+
+// String returns a string version of a RADIUSAttributeType.
+func (t RADIUSAttributeType) String() (s string) {
+	switch t {
+	case RADIUSAttributeTypeUserName:
+		s = "User-Name"
+	case RADIUSAttributeTypeUserPassword:
+		s = "User-Password"
+	case RADIUSAttributeTypeCHAPPassword:
+		s = "CHAP-Password"
+	case RADIUSAttributeTypeNASIPAddress:
+		s = "NAS-IP-Address"
+	case RADIUSAttributeTypeNASPort:
+		s = "NAS-Port"
+	case RADIUSAttributeTypeServiceType:
+		s = "Service-Type"
+	case RADIUSAttributeTypeFramedProtocol:
+		s = "Framed-Protocol"
+	case RADIUSAttributeTypeFramedIPAddress:
+		s = "Framed-IP-Address"
+	case RADIUSAttributeTypeFramedIPNetmask:
+		s = "Framed-IP-Netmask"
+	case RADIUSAttributeTypeFramedRouting:
+		s = "Framed-Routing"
+	case RADIUSAttributeTypeFilterId:
+		s = "Filter-Id"
+	case RADIUSAttributeTypeFramedMTU:
+		s = "Framed-MTU"
+	case RADIUSAttributeTypeFramedCompression:
+		s = "Framed-Compression"
+	case RADIUSAttributeTypeLoginIPHost:
+		s = "Login-IP-Host"
+	case RADIUSAttributeTypeLoginService:
+		s = "Login-Service"
+	case RADIUSAttributeTypeLoginTCPPort:
+		s = "Login-TCP-Port"
+	case RADIUSAttributeTypeReplyMessage:
+		s = "Reply-Message"
+	case RADIUSAttributeTypeCallbackNumber:
+		s = "Callback-Number"
+	case RADIUSAttributeTypeCallbackId:
+		s = "Callback-Id"
+	case RADIUSAttributeTypeFramedRoute:
+		s = "Framed-Route"
+	case RADIUSAttributeTypeFramedIPXNetwork:
+		s = "Framed-IPX-Network"
+	case RADIUSAttributeTypeState:
+		s = "State"
+	case RADIUSAttributeTypeClass:
+		s = "Class"
+	case RADIUSAttributeTypeVendorSpecific:
+		s = "Vendor-Specific"
+	case RADIUSAttributeTypeSessionTimeout:
+		s = "Session-Timeout"
+	case RADIUSAttributeTypeIdleTimeout:
+		s = "Idle-Timeout"
+	case RADIUSAttributeTypeTerminationAction:
+		s = "Termination-Action"
+	case RADIUSAttributeTypeCalledStationId:
+		s = "Called-Station-Id"
+	case RADIUSAttributeTypeCallingStationId:
+		s = "Calling-Station-Id"
+	case RADIUSAttributeTypeNASIdentifier:
+		s = "NAS-Identifier"
+	case RADIUSAttributeTypeProxyState:
+		s = "Proxy-State"
+	case RADIUSAttributeTypeLoginLATService:
+		s = "Login-LAT-Service"
+	case RADIUSAttributeTypeLoginLATNode:
+		s = "Login-LAT-Node"
+	case RADIUSAttributeTypeLoginLATGroup:
+		s = "Login-LAT-Group"
+	case RADIUSAttributeTypeFramedAppleTalkLink:
+		s = "Framed-AppleTalk-Link"
+	case RADIUSAttributeTypeFramedAppleTalkNetwork:
+		s = "Framed-AppleTalk-Network"
+	case RADIUSAttributeTypeFramedAppleTalkZone:
+		s = "Framed-AppleTalk-Zone"
+	case RADIUSAttributeTypeAcctStatusType:
+		s = "Acct-Status-Type"
+	case RADIUSAttributeTypeAcctDelayTime:
+		s = "Acct-Delay-Time"
+	case RADIUSAttributeTypeAcctInputOctets:
+		s = "Acct-Input-Octets"
+	case RADIUSAttributeTypeAcctOutputOctets:
+		s = "Acct-Output-Octets"
+	case RADIUSAttributeTypeAcctSessionId:
+		s = "Acct-Session-Id"
+	case RADIUSAttributeTypeAcctAuthentic:
+		s = "Acct-Authentic"
+	case RADIUSAttributeTypeAcctSessionTime:
+		s = "Acct-Session-Time"
+	case RADIUSAttributeTypeAcctInputPackets:
+		s = "Acct-Input-Packets"
+	case RADIUSAttributeTypeAcctOutputPackets:
+		s = "Acct-Output-Packets"
+	case RADIUSAttributeTypeAcctTerminateCause:
+		s = "Acct-Terminate-Cause"
+	case RADIUSAttributeTypeAcctMultiSessionId:
+		s = "Acct-Multi-Session-Id"
+	case RADIUSAttributeTypeAcctLinkCount:
+		s = "Acct-Link-Count"
+	case RADIUSAttributeTypeAcctInputGigawords:
+		s = "Acct-Input-Gigawords"
+	case RADIUSAttributeTypeAcctOutputGigawords:
+		s = "Acct-Output-Gigawords"
+	case RADIUSAttributeTypeEventTimestamp:
+		s = "Event-Timestamp"
+	case RADIUSAttributeTypeCHAPChallenge:
+		s = "CHAP-Challenge"
+	case RADIUSAttributeTypeNASPortType:
+		s = "NAS-Port-Type"
+	case RADIUSAttributeTypePortLimit:
+		s = "Port-Limit"
+	case RADIUSAttributeTypeLoginLATPort:
+		s = "Login-LAT-Port"
+	case RADIUSAttributeTypeTunnelType:
+		s = "Tunnel-Type"
+	case RADIUSAttributeTypeTunnelMediumType:
+		s = "Tunnel-Medium-Type"
+	case RADIUSAttributeTypeTunnelClientEndpoint:
+		s = "Tunnel-Client-Endpoint"
+	case RADIUSAttributeTypeTunnelServerEndpoint:
+		s = "Tunnel-Server-Endpoint"
+	case RADIUSAttributeTypeAcctTunnelConnection:
+		s = "Acct-Tunnel-Connection"
+	case RADIUSAttributeTypeTunnelPassword:
+		s = "Tunnel-Password"
+	case RADIUSAttributeTypeARAPPassword:
+		s = "ARAP-Password"
+	case RADIUSAttributeTypeARAPFeatures:
+		s = "ARAP-Features"
+	case RADIUSAttributeTypeARAPZoneAccess:
+		s = "ARAP-Zone-Access"
+	case RADIUSAttributeTypeARAPSecurity:
+		s = "ARAP-Security"
+	case RADIUSAttributeTypeARAPSecurityData:
+		s = "ARAP-Security-Data"
+	case RADIUSAttributeTypePasswordRetry:
+		s = "Password-Retry"
+	case RADIUSAttributeTypePrompt:
+		s = "Prompt"
+	case RADIUSAttributeTypeConnectInfo:
+		s = "Connect-Info"
+	case RADIUSAttributeTypeConfigurationToken:
+		s = "Configuration-Token"
+	case RADIUSAttributeTypeEAPMessage:
+		s = "EAP-Message"
+	case RADIUSAttributeTypeMessageAuthenticator:
+		s = "Message-Authenticator"
+	case RADIUSAttributeTypeTunnelPrivateGroupID:
+		s = "Tunnel-Private-Group-ID"
+	case RADIUSAttributeTypeTunnelAssignmentID:
+		s = "Tunnel-Assignment-ID"
+	case RADIUSAttributeTypeTunnelPreference:
+		s = "Tunnel-Preference"
+	case RADIUSAttributeTypeARAPChallengeResponse:
+		s = "ARAP-Challenge-Response"
+	case RADIUSAttributeTypeAcctInterimInterval:
+		s = "Acct-Interim-Interval"
+	case RADIUSAttributeTypeAcctTunnelPacketsLost:
+		s = "Acct-Tunnel-Packets-Lost"
+	case RADIUSAttributeTypeNASPortId:
+		s = "NAS-Port-Id"
+	case RADIUSAttributeTypeFramedPool:
+		s = "Framed-Pool"
+	case RADIUSAttributeTypeTunnelClientAuthID:
+		s = "Tunnel-Client-Auth-ID"
+	case RADIUSAttributeTypeTunnelServerAuthID:
+		s = "Tunnel-Server-Auth-ID"
+	default:
+		s = fmt.Sprintf("Unknown(%d)", t)
+	}
+	return
+}
+
+// Len returns the length of a RADIUS packet.
+func (radius *RADIUS) Len() (int, error) {
+	n := radiusMinimumRecordSizeInBytes
+	for _, v := range radius.Attributes {
+		alen, err := attributeValueLength(v.Value)
+		if err != nil {
+			return 0, err
+		}
+		n += int(alen) + 2 // Added Type and Length
+	}
+	return n, nil
+}
+
+// LayerType returns LayerTypeRADIUS.
+func (radius *RADIUS) LayerType() gopacket.LayerType {
+	return LayerTypeRADIUS
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (radius *RADIUS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) > radiusMaximumRecordSizeInBytes {
+		df.SetTruncated()
+		return fmt.Errorf("RADIUS length %d too big", len(data))
+	}
+
+	if len(data) < radiusMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return fmt.Errorf("RADIUS length %d too short", len(data))
+	}
+
+	radius.BaseLayer = BaseLayer{Contents: data}
+
+	radius.Code = RADIUSCode(data[0])
+	radius.Identifier = RADIUSIdentifier(data[1])
+	radius.Length = RADIUSLength(binary.BigEndian.Uint16(data[2:4]))
+
+	if int(radius.Length) > radiusMaximumRecordSizeInBytes {
+		df.SetTruncated()
+		return fmt.Errorf("RADIUS length %d too big", radius.Length)
+	}
+
+	if int(radius.Length) < radiusMinimumRecordSizeInBytes {
+		df.SetTruncated()
+		return fmt.Errorf("RADIUS length %d too short", radius.Length)
+	}
+
+	// RFC 2865 3.  Packet Format
+	// `If the packet is shorter than the Length field indicates, it MUST be silently discarded.`
+	if int(radius.Length) > len(data) {
+		df.SetTruncated()
+		return fmt.Errorf("RADIUS length %d too big", radius.Length)
+	}
+
+	// RFC 2865 3.  Packet Format
+	// `Octets outside the range of the Length field MUST be treated as padding and ignored on reception.`
+	if int(radius.Length) < len(data) {
+		df.SetTruncated()
+		data = data[:radius.Length]
+	}
+
+	copy(radius.Authenticator[:], data[4:20])
+
+	if len(data) == radiusMinimumRecordSizeInBytes {
+		return nil
+	}
+
+	pos := radiusMinimumRecordSizeInBytes
+	for {
+		if len(data) == pos {
+			break
+		}
+
+		if len(data[pos:]) < radiusAttributesMinimumRecordSizeInBytes {
+			df.SetTruncated()
+			return fmt.Errorf("RADIUS attributes length %d too short", len(data[pos:]))
+		}
+
+		attr := RADIUSAttribute{}
+		attr.Type = RADIUSAttributeType(data[pos])
+		attr.Length = RADIUSAttributeLength(data[pos+1])
+
+		if int(attr.Length) > len(data[pos:]) {
+			df.SetTruncated()
+			return fmt.Errorf("RADIUS attributes length %d too big", attr.Length)
+		}
+
+		if int(attr.Length) < radiusAttributesMinimumRecordSizeInBytes {
+			df.SetTruncated()
+			return fmt.Errorf("RADIUS attributes length %d too short", attr.Length)
+		}
+
+		if int(attr.Length) > radiusAttributesMinimumRecordSizeInBytes {
+			attr.Value = make([]byte, attr.Length-2)
+			copy(attr.Value[:], data[pos+2:pos+int(attr.Length)])
+			radius.Attributes = append(radius.Attributes, attr)
+		}
+
+		pos += int(attr.Length)
+	}
+
+	for _, v := range radius.Attributes {
+		if v.Type == RADIUSAttributeTypeEAPMessage {
+			radius.BaseLayer.Payload = append(radius.BaseLayer.Payload, v.Value...)
+		}
+	}
+
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (radius *RADIUS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	plen, err := radius.Len()
+	if err != nil {
+		return err
+	}
+
+	if opts.FixLengths {
+		radius.Length = RADIUSLength(plen)
+	}
+
+	data, err := b.PrependBytes(plen)
+	if err != nil {
+		return err
+	}
+
+	data[0] = byte(radius.Code)
+	data[1] = byte(radius.Identifier)
+	binary.BigEndian.PutUint16(data[2:], uint16(radius.Length))
+	copy(data[4:20], radius.Authenticator[:])
+
+	pos := radiusMinimumRecordSizeInBytes
+	for _, v := range radius.Attributes {
+		if opts.FixLengths {
+			v.Length, err = attributeValueLength(v.Value)
+			if err != nil {
+				return err
+			}
+		}
+
+		data[pos] = byte(v.Type)
+		data[pos+1] = byte(v.Length)
+		copy(data[pos+2:], v.Value[:])
+
+		pos += len(v.Value) + 2 // Added Type and Length
+	}
+
+	return nil
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (radius *RADIUS) CanDecode() gopacket.LayerClass {
+	return LayerTypeRADIUS
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (radius *RADIUS) NextLayerType() gopacket.LayerType {
+	if len(radius.BaseLayer.Payload) > 0 {
+		return LayerTypeEAP
+	} else {
+		return gopacket.LayerTypeZero
+	}
+}
+
+// Payload returns the EAP Type-Data for EAP-Message attributes.
+func (radius *RADIUS) Payload() []byte {
+	return radius.BaseLayer.Payload
+}
+
+func decodeRADIUS(data []byte, p gopacket.PacketBuilder) error {
+	radius := &RADIUS{}
+	err := radius.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(radius)
+	p.SetApplicationLayer(radius)
+	next := radius.NextLayerType()
+	if next == gopacket.LayerTypeZero {
+		return nil
+	}
+	return p.NextDecoder(next)
+}
+
+func attributeValueLength(v []byte) (RADIUSAttributeLength, error) {
+	n := len(v)
+	if n > 255 {
+		return 0, fmt.Errorf("RADIUS attribute value length %d too long", n)
+	} else {
+		return RADIUSAttributeLength(n), nil
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/rmcp.go b/vendor/github.com/google/gopacket/layers/rmcp.go
new file mode 100644
index 0000000..5474fee
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/rmcp.go
@@ -0,0 +1,170 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license that can be found
+// in the LICENSE file in the root of the source tree.
+
+package layers
+
+// This file implements the ASF-RMCP header specified in section 3.2.2.2 of
+// https://www.dmtf.org/sites/default/files/standards/documents/DSP0136.pdf
+
+import (
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// RMCPClass is the class of a RMCP layer's payload, e.g. ASF or IPMI. This is a
+// 4-bit unsigned int on the wire; all but 6 (ASF), 7 (IPMI) and 8 (OEM-defined)
+// are currently reserved.
+type RMCPClass uint8
+
+// LayerType returns the payload layer type corresponding to a RMCP class.
+func (c RMCPClass) LayerType() gopacket.LayerType {
+	if lt := rmcpClassLayerTypes[uint8(c)]; lt != 0 {
+		return lt
+	}
+	return gopacket.LayerTypePayload
+}
+
+func (c RMCPClass) String() string {
+	return fmt.Sprintf("%v(%v)", uint8(c), c.LayerType())
+}
+
+const (
+	// RMCPVersion1 identifies RMCP v1.0 in the Version header field. Lower
+	// values are considered legacy, while higher values are reserved by the
+	// specification.
+	RMCPVersion1 uint8 = 0x06
+
+	// RMCPNormal indicates a "normal" message, i.e. not an acknowledgement.
+	RMCPNormal uint8 = 0
+
+	// RMCPAck indicates a message is acknowledging a received normal message.
+	RMCPAck uint8 = 1 << 7
+
+	// RMCPClassASF identifies an RMCP message as containing an ASF-RMCP
+	// payload.
+	RMCPClassASF RMCPClass = 0x06
+
+	// RMCPClassIPMI identifies an RMCP message as containing an IPMI payload.
+	RMCPClassIPMI RMCPClass = 0x07
+
+	// RMCPClassOEM identifies an RMCP message as containing an OEM-defined
+	// payload.
+	RMCPClassOEM RMCPClass = 0x08
+)
+
+var (
+	rmcpClassLayerTypes = [16]gopacket.LayerType{
+		RMCPClassASF: LayerTypeASF,
+		// RMCPClassIPMI is to implement; RMCPClassOEM is deliberately not
+		// implemented, so we return LayerTypePayload
+	}
+)
+
+// RegisterRMCPLayerType allows specifying that the payload of a RMCP packet of
+// a certain class should processed by the provided layer type. This overrides
+// any existing registrations, including defaults.
+func RegisterRMCPLayerType(c RMCPClass, l gopacket.LayerType) {
+	rmcpClassLayerTypes[c] = l
+}
+
+// RMCP describes the format of an RMCP header, which forms a UDP payload. See
+// section 3.2.2.2.
+type RMCP struct {
+	BaseLayer
+
+	// Version identifies the version of the RMCP header. 0x06 indicates RMCP
+	// v1.0; lower values are legacy, higher values are reserved.
+	Version uint8
+
+	// Sequence is the sequence number assicated with the message. Note that
+	// this rolls over to 0 after 254, not 255. Seq num 255 indicates the
+	// receiver must not send an ACK.
+	Sequence uint8
+
+	// Ack indicates whether this packet is an acknowledgement. If it is, the
+	// payload will be empty.
+	Ack bool
+
+	// Class idicates the structure of the payload. There are only 2^4 valid
+	// values, however there is no uint4 data type. N.B. the Ack bit has been
+	// split off into another field. The most significant 4 bits of this field
+	// will always be 0.
+	Class RMCPClass
+}
+
+// LayerType returns LayerTypeRMCP. It partially satisfies Layer and
+// SerializableLayer.
+func (*RMCP) LayerType() gopacket.LayerType {
+	return LayerTypeRMCP
+}
+
+// CanDecode returns LayerTypeRMCP. It partially satisfies DecodingLayer.
+func (r *RMCP) CanDecode() gopacket.LayerClass {
+	return r.LayerType()
+}
+
+// DecodeFromBytes makes the layer represent the provided bytes. It partially
+// satisfies DecodingLayer.
+func (r *RMCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 4 {
+		df.SetTruncated()
+		return fmt.Errorf("invalid RMCP header, length %v less than 4",
+			len(data))
+	}
+
+	r.BaseLayer.Contents = data[:4]
+	r.BaseLayer.Payload = data[4:]
+
+	r.Version = uint8(data[0])
+	// 1 byte reserved
+	r.Sequence = uint8(data[2])
+	r.Ack = data[3]&RMCPAck != 0
+	r.Class = RMCPClass(data[3] & 0xF)
+	return nil
+}
+
+// NextLayerType returns the data layer of this RMCP layer. This partially
+// satisfies DecodingLayer.
+func (r *RMCP) NextLayerType() gopacket.LayerType {
+	return r.Class.LayerType()
+}
+
+// Payload returns the data layer. It partially satisfies ApplicationLayer.
+func (r *RMCP) Payload() []byte {
+	return r.BaseLayer.Payload
+}
+
+// SerializeTo writes the serialized fom of this layer into the SerializeBuffer,
+// partially satisfying SerializableLayer.
+func (r *RMCP) SerializeTo(b gopacket.SerializeBuffer, _ gopacket.SerializeOptions) error {
+	// The IPMI v1.5 spec contains a pad byte for frame sizes of certain lengths
+	// to work around issues in LAN chips. This is no longer necessary as of
+	// IPMI v2.0 (renamed to "legacy pad") so we do not attempt to add it. The
+	// same approach is taken by FreeIPMI:
+	// http://git.savannah.gnu.org/cgit/freeipmi.git/tree/libfreeipmi/interface/ipmi-lan-interface.c?id=b5ffcd38317daf42074458879f4c55ba6804a595#n836
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	bytes[0] = r.Version
+	bytes[1] = 0x00
+	bytes[2] = r.Sequence
+	bytes[3] = bool2uint8(r.Ack)<<7 | uint8(r.Class) // thanks, BFD layer
+	return nil
+}
+
+// decodeRMCP decodes the byte slice into an RMCP type, and sets the application
+// layer to it.
+func decodeRMCP(data []byte, p gopacket.PacketBuilder) error {
+	rmcp := &RMCP{}
+	err := rmcp.DecodeFromBytes(data, p)
+	p.AddLayer(rmcp)
+	p.SetApplicationLayer(rmcp)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(rmcp.NextLayerType())
+}
diff --git a/vendor/github.com/google/gopacket/layers/rudp.go b/vendor/github.com/google/gopacket/layers/rudp.go
new file mode 100644
index 0000000..8435129
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/rudp.go
@@ -0,0 +1,93 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+	"github.com/google/gopacket"
+)
+
+type RUDP struct {
+	BaseLayer
+	SYN, ACK, EACK, RST, NUL bool
+	Version                  uint8
+	HeaderLength             uint8
+	SrcPort, DstPort         RUDPPort
+	DataLength               uint16
+	Seq, Ack, Checksum       uint32
+	VariableHeaderArea       []byte
+	// RUDPHeaderSyn contains SYN information for the RUDP packet,
+	// if the SYN flag is set
+	*RUDPHeaderSYN
+	// RUDPHeaderEack contains EACK information for the RUDP packet,
+	// if the EACK flag is set.
+	*RUDPHeaderEACK
+}
+
+type RUDPHeaderSYN struct {
+	MaxOutstandingSegments, MaxSegmentSize, OptionFlags uint16
+}
+
+type RUDPHeaderEACK struct {
+	SeqsReceivedOK []uint32
+}
+
+// LayerType returns gopacket.LayerTypeRUDP.
+func (r *RUDP) LayerType() gopacket.LayerType { return LayerTypeRUDP }
+
+func decodeRUDP(data []byte, p gopacket.PacketBuilder) error {
+	r := &RUDP{
+		SYN:          data[0]&0x80 != 0,
+		ACK:          data[0]&0x40 != 0,
+		EACK:         data[0]&0x20 != 0,
+		RST:          data[0]&0x10 != 0,
+		NUL:          data[0]&0x08 != 0,
+		Version:      data[0] & 0x3,
+		HeaderLength: data[1],
+		SrcPort:      RUDPPort(data[2]),
+		DstPort:      RUDPPort(data[3]),
+		DataLength:   binary.BigEndian.Uint16(data[4:6]),
+		Seq:          binary.BigEndian.Uint32(data[6:10]),
+		Ack:          binary.BigEndian.Uint32(data[10:14]),
+		Checksum:     binary.BigEndian.Uint32(data[14:18]),
+	}
+	if r.HeaderLength < 9 {
+		return fmt.Errorf("RUDP packet with too-short header length %d", r.HeaderLength)
+	}
+	hlen := int(r.HeaderLength) * 2
+	r.Contents = data[:hlen]
+	r.Payload = data[hlen : hlen+int(r.DataLength)]
+	r.VariableHeaderArea = data[18:hlen]
+	headerData := r.VariableHeaderArea
+	switch {
+	case r.SYN:
+		if len(headerData) != 6 {
+			return fmt.Errorf("RUDP packet invalid SYN header length: %d", len(headerData))
+		}
+		r.RUDPHeaderSYN = &RUDPHeaderSYN{
+			MaxOutstandingSegments: binary.BigEndian.Uint16(headerData[:2]),
+			MaxSegmentSize:         binary.BigEndian.Uint16(headerData[2:4]),
+			OptionFlags:            binary.BigEndian.Uint16(headerData[4:6]),
+		}
+	case r.EACK:
+		if len(headerData)%4 != 0 {
+			return fmt.Errorf("RUDP packet invalid EACK header length: %d", len(headerData))
+		}
+		r.RUDPHeaderEACK = &RUDPHeaderEACK{make([]uint32, len(headerData)/4)}
+		for i := 0; i < len(headerData); i += 4 {
+			r.SeqsReceivedOK[i/4] = binary.BigEndian.Uint32(headerData[i : i+4])
+		}
+	}
+	p.AddLayer(r)
+	p.SetTransportLayer(r)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (r *RUDP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointRUDPPort, []byte{byte(r.SrcPort)}, []byte{byte(r.DstPort)})
+}
diff --git a/vendor/github.com/google/gopacket/layers/sctp.go b/vendor/github.com/google/gopacket/layers/sctp.go
new file mode 100644
index 0000000..511176e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sctp.go
@@ -0,0 +1,746 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"hash/crc32"
+
+	"github.com/google/gopacket"
+)
+
+// SCTP contains information on the top level of an SCTP packet.
+type SCTP struct {
+	BaseLayer
+	SrcPort, DstPort SCTPPort
+	VerificationTag  uint32
+	Checksum         uint32
+	sPort, dPort     []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTP
+func (s *SCTP) LayerType() gopacket.LayerType { return LayerTypeSCTP }
+
+func decodeSCTP(data []byte, p gopacket.PacketBuilder) error {
+	sctp := &SCTP{}
+	err := sctp.DecodeFromBytes(data, p)
+	p.AddLayer(sctp)
+	p.SetTransportLayer(sctp)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(sctpChunkTypePrefixDecoder)
+}
+
+var sctpChunkTypePrefixDecoder = gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix)
+
+// TransportFlow returns a flow based on the source and destination SCTP port.
+func (s *SCTP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointSCTPPort, s.sPort, s.dPort)
+}
+
+func decodeWithSCTPChunkTypePrefix(data []byte, p gopacket.PacketBuilder) error {
+	chunkType := SCTPChunkType(data[0])
+	return chunkType.Decode(data, p)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(12)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes[0:2], uint16(s.SrcPort))
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(s.DstPort))
+	binary.BigEndian.PutUint32(bytes[4:8], s.VerificationTag)
+	if opts.ComputeChecksums {
+		// Note:  MakeTable(Castagnoli) actually only creates the table once, then
+		// passes back a singleton on every other call, so this shouldn't cause
+		// excessive memory allocation.
+		binary.LittleEndian.PutUint32(bytes[8:12], crc32.Checksum(b.Bytes(), crc32.MakeTable(crc32.Castagnoli)))
+	}
+	return nil
+}
+
+func (sctp *SCTP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 12 {
+		return errors.New("Invalid SCTP common header length")
+	}
+	sctp.SrcPort = SCTPPort(binary.BigEndian.Uint16(data[:2]))
+	sctp.sPort = data[:2]
+	sctp.DstPort = SCTPPort(binary.BigEndian.Uint16(data[2:4]))
+	sctp.dPort = data[2:4]
+	sctp.VerificationTag = binary.BigEndian.Uint32(data[4:8])
+	sctp.Checksum = binary.BigEndian.Uint32(data[8:12])
+	sctp.BaseLayer = BaseLayer{data[:12], data[12:]}
+
+	return nil
+}
+
+func (t *SCTP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSCTP
+}
+
+func (t *SCTP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// SCTPChunk contains the common fields in all SCTP chunks.
+type SCTPChunk struct {
+	BaseLayer
+	Type   SCTPChunkType
+	Flags  uint8
+	Length uint16
+	// ActualLength is the total length of an SCTP chunk, including padding.
+	// SCTP chunks start and end on 4-byte boundaries.  So if a chunk has a length
+	// of 18, it means that it has data up to and including byte 18, then padding
+	// up to the next 4-byte boundary, 20.  In this case, Length would be 18, and
+	// ActualLength would be 20.
+	ActualLength int
+}
+
+func roundUpToNearest4(i int) int {
+	if i%4 == 0 {
+		return i
+	}
+	return i + 4 - (i % 4)
+}
+
+func decodeSCTPChunk(data []byte) (SCTPChunk, error) {
+	length := binary.BigEndian.Uint16(data[2:4])
+	if length < 4 {
+		return SCTPChunk{}, errors.New("invalid SCTP chunk length")
+	}
+	actual := roundUpToNearest4(int(length))
+	ct := SCTPChunkType(data[0])
+
+	// For SCTP Data, use a separate layer for the payload
+	delta := 0
+	if ct == SCTPChunkTypeData {
+		delta = int(actual) - int(length)
+		actual = 16
+	}
+
+	return SCTPChunk{
+		Type:         ct,
+		Flags:        data[1],
+		Length:       length,
+		ActualLength: actual,
+		BaseLayer:    BaseLayer{data[:actual], data[actual : len(data)-delta]},
+	}, nil
+}
+
+// SCTPParameter is a TLV parameter inside a SCTPChunk.
+type SCTPParameter struct {
+	Type         uint16
+	Length       uint16
+	ActualLength int
+	Value        []byte
+}
+
+func decodeSCTPParameter(data []byte) SCTPParameter {
+	length := binary.BigEndian.Uint16(data[2:4])
+	return SCTPParameter{
+		Type:         binary.BigEndian.Uint16(data[0:2]),
+		Length:       length,
+		Value:        data[4:length],
+		ActualLength: roundUpToNearest4(int(length)),
+	}
+}
+
+func (p SCTPParameter) Bytes() []byte {
+	length := 4 + len(p.Value)
+	data := make([]byte, roundUpToNearest4(length))
+	binary.BigEndian.PutUint16(data[0:2], p.Type)
+	binary.BigEndian.PutUint16(data[2:4], uint16(length))
+	copy(data[4:], p.Value)
+	return data
+}
+
+// SCTPUnknownChunkType is the layer type returned when we don't recognize the
+// chunk type.  Since there's a length in a known location, we can skip over
+// it even if we don't know what it is, and continue parsing the rest of the
+// chunks.  This chunk is stored as an ErrorLayer in the packet.
+type SCTPUnknownChunkType struct {
+	SCTPChunk
+	bytes []byte
+}
+
+func decodeSCTPChunkTypeUnknown(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPUnknownChunkType{SCTPChunk: chunk}
+	sc.bytes = data[:sc.ActualLength]
+	p.AddLayer(sc)
+	p.SetErrorLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (s SCTPUnknownChunkType) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(s.ActualLength)
+	if err != nil {
+		return err
+	}
+	copy(bytes, s.bytes)
+	return nil
+}
+
+// LayerType returns gopacket.LayerTypeSCTPUnknownChunkType.
+func (s *SCTPUnknownChunkType) LayerType() gopacket.LayerType { return LayerTypeSCTPUnknownChunkType }
+
+// Payload returns all bytes in this header, including the decoded Type, Length,
+// and Flags.
+func (s *SCTPUnknownChunkType) Payload() []byte { return s.bytes }
+
+// Error implements ErrorLayer.
+func (s *SCTPUnknownChunkType) Error() error {
+	return fmt.Errorf("No decode method available for SCTP chunk type %s", s.Type)
+}
+
+// SCTPData is the SCTP Data chunk layer.
+type SCTPData struct {
+	SCTPChunk
+	Unordered, BeginFragment, EndFragment bool
+	TSN                                   uint32
+	StreamId                              uint16
+	StreamSequence                        uint16
+	PayloadProtocol                       SCTPPayloadProtocol
+}
+
+// LayerType returns gopacket.LayerTypeSCTPData.
+func (s *SCTPData) LayerType() gopacket.LayerType { return LayerTypeSCTPData }
+
+// SCTPPayloadProtocol represents a payload protocol
+type SCTPPayloadProtocol uint32
+
+// SCTPPayloadProtocol constonts from http://www.iana.org/assignments/sctp-parameters/sctp-parameters.xhtml
+const (
+	SCTPProtocolReserved  SCTPPayloadProtocol = 0
+	SCTPPayloadUIA                            = 1
+	SCTPPayloadM2UA                           = 2
+	SCTPPayloadM3UA                           = 3
+	SCTPPayloadSUA                            = 4
+	SCTPPayloadM2PA                           = 5
+	SCTPPayloadV5UA                           = 6
+	SCTPPayloadH248                           = 7
+	SCTPPayloadBICC                           = 8
+	SCTPPayloadTALI                           = 9
+	SCTPPayloadDUA                            = 10
+	SCTPPayloadASAP                           = 11
+	SCTPPayloadENRP                           = 12
+	SCTPPayloadH323                           = 13
+	SCTPPayloadQIPC                           = 14
+	SCTPPayloadSIMCO                          = 15
+	SCTPPayloadDDPSegment                     = 16
+	SCTPPayloadDDPStream                      = 17
+	SCTPPayloadS1AP                           = 18
+)
+
+func (p SCTPPayloadProtocol) String() string {
+	switch p {
+	case SCTPProtocolReserved:
+		return "Reserved"
+	case SCTPPayloadUIA:
+		return "UIA"
+	case SCTPPayloadM2UA:
+		return "M2UA"
+	case SCTPPayloadM3UA:
+		return "M3UA"
+	case SCTPPayloadSUA:
+		return "SUA"
+	case SCTPPayloadM2PA:
+		return "M2PA"
+	case SCTPPayloadV5UA:
+		return "V5UA"
+	case SCTPPayloadH248:
+		return "H.248"
+	case SCTPPayloadBICC:
+		return "BICC"
+	case SCTPPayloadTALI:
+		return "TALI"
+	case SCTPPayloadDUA:
+		return "DUA"
+	case SCTPPayloadASAP:
+		return "ASAP"
+	case SCTPPayloadENRP:
+		return "ENRP"
+	case SCTPPayloadH323:
+		return "H.323"
+	case SCTPPayloadQIPC:
+		return "QIPC"
+	case SCTPPayloadSIMCO:
+		return "SIMCO"
+	case SCTPPayloadDDPSegment:
+		return "DDPSegment"
+	case SCTPPayloadDDPStream:
+		return "DDPStream"
+	case SCTPPayloadS1AP:
+		return "S1AP"
+	}
+	return fmt.Sprintf("Unknown(%d)", p)
+}
+
+func decodeSCTPData(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPData{
+		SCTPChunk:       chunk,
+		Unordered:       data[1]&0x4 != 0,
+		BeginFragment:   data[1]&0x2 != 0,
+		EndFragment:     data[1]&0x1 != 0,
+		TSN:             binary.BigEndian.Uint32(data[4:8]),
+		StreamId:        binary.BigEndian.Uint16(data[8:10]),
+		StreamSequence:  binary.BigEndian.Uint16(data[10:12]),
+		PayloadProtocol: SCTPPayloadProtocol(binary.BigEndian.Uint32(data[12:16])),
+	}
+	// Length is the length in bytes of the data, INCLUDING the 16-byte header.
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPData) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	payload := b.Bytes()
+	// Pad the payload to a 32 bit boundary
+	if rem := len(payload) % 4; rem != 0 {
+		b.AppendBytes(4 - rem)
+	}
+	length := 16
+	bytes, err := b.PrependBytes(length)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	flags := uint8(0)
+	if sc.Unordered {
+		flags |= 0x4
+	}
+	if sc.BeginFragment {
+		flags |= 0x2
+	}
+	if sc.EndFragment {
+		flags |= 0x1
+	}
+	bytes[1] = flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length+len(payload)))
+	binary.BigEndian.PutUint32(bytes[4:8], sc.TSN)
+	binary.BigEndian.PutUint16(bytes[8:10], sc.StreamId)
+	binary.BigEndian.PutUint16(bytes[10:12], sc.StreamSequence)
+	binary.BigEndian.PutUint32(bytes[12:16], uint32(sc.PayloadProtocol))
+	return nil
+}
+
+// SCTPInitParameter is a parameter for an SCTP Init or InitAck packet.
+type SCTPInitParameter SCTPParameter
+
+// SCTPInit is used as the return value for both SCTPInit and SCTPInitAck
+// messages.
+type SCTPInit struct {
+	SCTPChunk
+	InitiateTag                     uint32
+	AdvertisedReceiverWindowCredit  uint32
+	OutboundStreams, InboundStreams uint16
+	InitialTSN                      uint32
+	Parameters                      []SCTPInitParameter
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPInit or gopacket.LayerTypeSCTPInitAck.
+func (sc *SCTPInit) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeInitAck {
+		return LayerTypeSCTPInitAck
+	}
+	// sc.Type == SCTPChunkTypeInit
+	return LayerTypeSCTPInit
+}
+
+func decodeSCTPInit(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPInit{
+		SCTPChunk:                      chunk,
+		InitiateTag:                    binary.BigEndian.Uint32(data[4:8]),
+		AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+		OutboundStreams:                binary.BigEndian.Uint16(data[12:14]),
+		InboundStreams:                 binary.BigEndian.Uint16(data[14:16]),
+		InitialTSN:                     binary.BigEndian.Uint32(data[16:20]),
+	}
+	paramData := data[20:sc.ActualLength]
+	for len(paramData) > 0 {
+		p := SCTPInitParameter(decodeSCTPParameter(paramData))
+		paramData = paramData[p.ActualLength:]
+		sc.Parameters = append(sc.Parameters, p)
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPInit) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var payload []byte
+	for _, param := range sc.Parameters {
+		payload = append(payload, SCTPParameter(param).Bytes()...)
+	}
+	length := 20 + len(payload)
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	binary.BigEndian.PutUint32(bytes[4:8], sc.InitiateTag)
+	binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+	binary.BigEndian.PutUint16(bytes[12:14], sc.OutboundStreams)
+	binary.BigEndian.PutUint16(bytes[14:16], sc.InboundStreams)
+	binary.BigEndian.PutUint32(bytes[16:20], sc.InitialTSN)
+	copy(bytes[20:], payload)
+	return nil
+}
+
+// SCTPSack is the SCTP Selective ACK chunk layer.
+type SCTPSack struct {
+	SCTPChunk
+	CumulativeTSNAck               uint32
+	AdvertisedReceiverWindowCredit uint32
+	NumGapACKs, NumDuplicateTSNs   uint16
+	GapACKs                        []uint16
+	DuplicateTSNs                  []uint32
+}
+
+// LayerType return LayerTypeSCTPSack
+func (sc *SCTPSack) LayerType() gopacket.LayerType {
+	return LayerTypeSCTPSack
+}
+
+func decodeSCTPSack(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPSack{
+		SCTPChunk:                      chunk,
+		CumulativeTSNAck:               binary.BigEndian.Uint32(data[4:8]),
+		AdvertisedReceiverWindowCredit: binary.BigEndian.Uint32(data[8:12]),
+		NumGapACKs:                     binary.BigEndian.Uint16(data[12:14]),
+		NumDuplicateTSNs:               binary.BigEndian.Uint16(data[14:16]),
+	}
+	// We maximize gapAcks and dupTSNs here so we're not allocating tons
+	// of memory based on a user-controlable field.  Our maximums are not exact,
+	// but should give us sane defaults... we'll still hit slice boundaries and
+	// fail if the user-supplied values are too high (in the for loops below), but
+	// the amount of memory we'll have allocated because of that should be small
+	// (< sc.ActualLength)
+	gapAcks := sc.SCTPChunk.ActualLength / 2
+	dupTSNs := (sc.SCTPChunk.ActualLength - gapAcks*2) / 4
+	if gapAcks > int(sc.NumGapACKs) {
+		gapAcks = int(sc.NumGapACKs)
+	}
+	if dupTSNs > int(sc.NumDuplicateTSNs) {
+		dupTSNs = int(sc.NumDuplicateTSNs)
+	}
+	sc.GapACKs = make([]uint16, 0, gapAcks)
+	sc.DuplicateTSNs = make([]uint32, 0, dupTSNs)
+	bytesRemaining := data[16:]
+	for i := 0; i < int(sc.NumGapACKs); i++ {
+		sc.GapACKs = append(sc.GapACKs, binary.BigEndian.Uint16(bytesRemaining[:2]))
+		bytesRemaining = bytesRemaining[2:]
+	}
+	for i := 0; i < int(sc.NumDuplicateTSNs); i++ {
+		sc.DuplicateTSNs = append(sc.DuplicateTSNs, binary.BigEndian.Uint32(bytesRemaining[:4]))
+		bytesRemaining = bytesRemaining[4:]
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPSack) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	length := 16 + 2*len(sc.GapACKs) + 4*len(sc.DuplicateTSNs)
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+	binary.BigEndian.PutUint32(bytes[8:12], sc.AdvertisedReceiverWindowCredit)
+	binary.BigEndian.PutUint16(bytes[12:14], uint16(len(sc.GapACKs)))
+	binary.BigEndian.PutUint16(bytes[14:16], uint16(len(sc.DuplicateTSNs)))
+	for i, v := range sc.GapACKs {
+		binary.BigEndian.PutUint16(bytes[16+i*2:], v)
+	}
+	offset := 16 + 2*len(sc.GapACKs)
+	for i, v := range sc.DuplicateTSNs {
+		binary.BigEndian.PutUint32(bytes[offset+i*4:], v)
+	}
+	return nil
+}
+
+// SCTPHeartbeatParameter is the parameter type used by SCTP heartbeat and
+// heartbeat ack layers.
+type SCTPHeartbeatParameter SCTPParameter
+
+// SCTPHeartbeat is the SCTP heartbeat layer, also used for heatbeat ack.
+type SCTPHeartbeat struct {
+	SCTPChunk
+	Parameters []SCTPHeartbeatParameter
+}
+
+// LayerType returns gopacket.LayerTypeSCTPHeartbeat.
+func (sc *SCTPHeartbeat) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeHeartbeatAck {
+		return LayerTypeSCTPHeartbeatAck
+	}
+	// sc.Type == SCTPChunkTypeHeartbeat
+	return LayerTypeSCTPHeartbeat
+}
+
+func decodeSCTPHeartbeat(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPHeartbeat{
+		SCTPChunk: chunk,
+	}
+	paramData := data[4:sc.Length]
+	for len(paramData) > 0 {
+		p := SCTPHeartbeatParameter(decodeSCTPParameter(paramData))
+		paramData = paramData[p.ActualLength:]
+		sc.Parameters = append(sc.Parameters, p)
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPHeartbeat) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var payload []byte
+	for _, param := range sc.Parameters {
+		payload = append(payload, SCTPParameter(param).Bytes()...)
+	}
+	length := 4 + len(payload)
+
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	copy(bytes[4:], payload)
+	return nil
+}
+
+// SCTPErrorParameter is the parameter type used by SCTP Abort and Error layers.
+type SCTPErrorParameter SCTPParameter
+
+// SCTPError is the SCTP error layer, also used for SCTP aborts.
+type SCTPError struct {
+	SCTPChunk
+	Parameters []SCTPErrorParameter
+}
+
+// LayerType returns LayerTypeSCTPAbort or LayerTypeSCTPError.
+func (sc *SCTPError) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeAbort {
+		return LayerTypeSCTPAbort
+	}
+	// sc.Type == SCTPChunkTypeError
+	return LayerTypeSCTPError
+}
+
+func decodeSCTPError(data []byte, p gopacket.PacketBuilder) error {
+	// remarkably similar to decodeSCTPHeartbeat ;)
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPError{
+		SCTPChunk: chunk,
+	}
+	paramData := data[4:sc.Length]
+	for len(paramData) > 0 {
+		p := SCTPErrorParameter(decodeSCTPParameter(paramData))
+		paramData = paramData[p.ActualLength:]
+		sc.Parameters = append(sc.Parameters, p)
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPError) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var payload []byte
+	for _, param := range sc.Parameters {
+		payload = append(payload, SCTPParameter(param).Bytes()...)
+	}
+	length := 4 + len(payload)
+
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	copy(bytes[4:], payload)
+	return nil
+}
+
+// SCTPShutdown is the SCTP shutdown layer.
+type SCTPShutdown struct {
+	SCTPChunk
+	CumulativeTSNAck uint32
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdown.
+func (sc *SCTPShutdown) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdown }
+
+func decodeSCTPShutdown(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPShutdown{
+		SCTPChunk:        chunk,
+		CumulativeTSNAck: binary.BigEndian.Uint32(data[4:8]),
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdown) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], 8)
+	binary.BigEndian.PutUint32(bytes[4:8], sc.CumulativeTSNAck)
+	return nil
+}
+
+// SCTPShutdownAck is the SCTP shutdown layer.
+type SCTPShutdownAck struct {
+	SCTPChunk
+}
+
+// LayerType returns gopacket.LayerTypeSCTPShutdownAck.
+func (sc *SCTPShutdownAck) LayerType() gopacket.LayerType { return LayerTypeSCTPShutdownAck }
+
+func decodeSCTPShutdownAck(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPShutdownAck{
+		SCTPChunk: chunk,
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPShutdownAck) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], 4)
+	return nil
+}
+
+// SCTPCookieEcho is the SCTP Cookie Echo layer.
+type SCTPCookieEcho struct {
+	SCTPChunk
+	Cookie []byte
+}
+
+// LayerType returns gopacket.LayerTypeSCTPCookieEcho.
+func (sc *SCTPCookieEcho) LayerType() gopacket.LayerType { return LayerTypeSCTPCookieEcho }
+
+func decodeSCTPCookieEcho(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPCookieEcho{
+		SCTPChunk: chunk,
+	}
+	sc.Cookie = data[4:sc.Length]
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPCookieEcho) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	length := 4 + len(sc.Cookie)
+	bytes, err := b.PrependBytes(roundUpToNearest4(length))
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], uint16(length))
+	copy(bytes[4:], sc.Cookie)
+	return nil
+}
+
+// This struct is used by all empty SCTP chunks (currently CookieAck and
+// ShutdownComplete).
+type SCTPEmptyLayer struct {
+	SCTPChunk
+}
+
+// LayerType returns either gopacket.LayerTypeSCTPShutdownComplete or
+// LayerTypeSCTPCookieAck.
+func (sc *SCTPEmptyLayer) LayerType() gopacket.LayerType {
+	if sc.Type == SCTPChunkTypeShutdownComplete {
+		return LayerTypeSCTPShutdownComplete
+	}
+	// sc.Type == SCTPChunkTypeCookieAck
+	return LayerTypeSCTPCookieAck
+}
+
+func decodeSCTPEmptyLayer(data []byte, p gopacket.PacketBuilder) error {
+	chunk, err := decodeSCTPChunk(data)
+	if err != nil {
+		return err
+	}
+	sc := &SCTPEmptyLayer{
+		SCTPChunk: chunk,
+	}
+	p.AddLayer(sc)
+	return p.NextDecoder(gopacket.DecodeFunc(decodeWithSCTPChunkTypePrefix))
+}
+
+// SerializeTo is for gopacket.SerializableLayer.
+func (sc SCTPEmptyLayer) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(4)
+	if err != nil {
+		return err
+	}
+	bytes[0] = uint8(sc.Type)
+	bytes[1] = sc.Flags
+	binary.BigEndian.PutUint16(bytes[2:4], 4)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sflow.go b/vendor/github.com/google/gopacket/layers/sflow.go
new file mode 100644
index 0000000..bc1c973
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sflow.go
@@ -0,0 +1,2567 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+/*
+This layer decodes SFlow version 5 datagrams.
+
+The specification can be found here: http://sflow.org/sflow_version_5.txt
+
+Additional developer information about sflow can be found at:
+http://sflow.org/developers/specifications.php
+
+And SFlow in general:
+http://sflow.org/index.php
+
+Two forms of sample data are defined: compact and expanded. The
+Specification has this to say:
+
+    Compact and expand forms of counter and flow samples are defined.
+    An agent must not mix compact/expanded encodings.  If an agent
+    will never use ifIndex numbers >= 2^24 then it must use compact
+    encodings for all interfaces.  Otherwise the expanded formats must
+    be used for all interfaces.
+
+This decoder only supports the compact form, because that is the only
+one for which data was available.
+
+The datagram is composed of one or more samples of type flow or counter,
+and each sample is composed of one or more records describing the sample.
+A sample is a single instance of sampled inforamtion, and each record in
+the sample gives additional / supplimentary information about the sample.
+
+The following sample record types are supported:
+
+	Raw Packet Header
+	opaque = flow_data; enterprise = 0; format = 1
+
+	Extended Switch Data
+	opaque = flow_data; enterprise = 0; format = 1001
+
+	Extended Router Data
+	opaque = flow_data; enterprise = 0; format = 1002
+
+	Extended Gateway Data
+	opaque = flow_data; enterprise = 0; format = 1003
+
+	Extended User Data
+	opaque = flow_data; enterprise = 0; format = 1004
+
+	Extended URL Data
+	opaque = flow_data; enterprise = 0; format = 1005
+
+The following types of counter records are supported:
+
+	Generic Interface Counters - see RFC 2233
+	opaque = counter_data; enterprise = 0; format = 1
+
+	Ethernet Interface Counters - see RFC 2358
+	opaque = counter_data; enterprise = 0; format = 2
+
+SFlow is encoded using XDR (RFC4506). There are a few places
+where the standard 4-byte fields are partitioned into two
+bitfields of different lengths. I'm not sure why the designers
+chose to pack together two values like this in some places, and
+in others they use the entire 4-byte value to store a number that
+will never be more than a few bits. In any case, there are a couple
+of types defined to handle the decoding of these bitfields, and
+that's why they're there. */
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+// SFlowRecord holds both flow sample records and counter sample records.
+// A Record is the structure that actually holds the sampled data
+// and / or counters.
+type SFlowRecord interface {
+}
+
+// SFlowDataSource encodes a 2-bit SFlowSourceFormat in its most significant
+// 2 bits, and an SFlowSourceValue in its least significant 30 bits.
+// These types and values define the meaning of the inteface information
+// presented in the sample metadata.
+type SFlowDataSource int32
+
+func (sdc SFlowDataSource) decode() (SFlowSourceFormat, SFlowSourceValue) {
+	leftField := sdc >> 30
+	rightField := uint32(0x3FFFFFFF) & uint32(sdc)
+	return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowDataSourceExpanded struct {
+	SourceIDClass SFlowSourceFormat
+	SourceIDIndex SFlowSourceValue
+}
+
+func (sdce SFlowDataSourceExpanded) decode() (SFlowSourceFormat, SFlowSourceValue) {
+	leftField := sdce.SourceIDClass >> 30
+	rightField := uint32(0x3FFFFFFF) & uint32(sdce.SourceIDIndex)
+	return SFlowSourceFormat(leftField), SFlowSourceValue(rightField)
+}
+
+type SFlowSourceFormat uint32
+
+type SFlowSourceValue uint32
+
+const (
+	SFlowTypeSingleInterface      SFlowSourceFormat = 0
+	SFlowTypePacketDiscarded      SFlowSourceFormat = 1
+	SFlowTypeMultipleDestinations SFlowSourceFormat = 2
+)
+
+func (sdf SFlowSourceFormat) String() string {
+	switch sdf {
+	case SFlowTypeSingleInterface:
+		return "Single Interface"
+	case SFlowTypePacketDiscarded:
+		return "Packet Discarded"
+	case SFlowTypeMultipleDestinations:
+		return "Multiple Destinations"
+	default:
+		return "UNKNOWN"
+	}
+}
+
+func decodeSFlow(data []byte, p gopacket.PacketBuilder) error {
+	s := &SFlowDatagram{}
+	err := s.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(s)
+	p.SetApplicationLayer(s)
+	return nil
+}
+
+// SFlowDatagram is the outermost container which holds some basic information
+// about the reporting agent, and holds at least one sample record
+type SFlowDatagram struct {
+	BaseLayer
+
+	DatagramVersion uint32
+	AgentAddress    net.IP
+	SubAgentID      uint32
+	SequenceNumber  uint32
+	AgentUptime     uint32
+	SampleCount     uint32
+	FlowSamples     []SFlowFlowSample
+	CounterSamples  []SFlowCounterSample
+}
+
+// An SFlow  datagram's outer container has the following
+// structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int sFlow version (2|4|5)           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |   int IP version of the Agent (1=v4|2=v6)     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /    Agent IP address (v4=4byte|v6=16byte)      /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int sub agent id                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |         int datagram sequence number          |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |            int switch uptime in ms            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int n samples in datagram            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                  n samples                    /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// SFlowDataFormat encodes the EnterpriseID in the most
+// significant 12 bits, and the SampleType in the least significant
+// 20 bits.
+type SFlowDataFormat uint32
+
+func (sdf SFlowDataFormat) decode() (SFlowEnterpriseID, SFlowSampleType) {
+	leftField := sdf >> 12
+	rightField := uint32(0xFFF) & uint32(sdf)
+	return SFlowEnterpriseID(leftField), SFlowSampleType(rightField)
+}
+
+// SFlowEnterpriseID is used to differentiate between the
+// official SFlow standard, and other, vendor-specific
+// types of flow data. (Similiar to SNMP's enterprise MIB
+// OIDs) Only the office SFlow Enterprise ID is decoded
+// here.
+type SFlowEnterpriseID uint32
+
+const (
+	SFlowStandard SFlowEnterpriseID = 0
+)
+
+func (eid SFlowEnterpriseID) String() string {
+	switch eid {
+	case SFlowStandard:
+		return "Standard SFlow"
+	default:
+		return ""
+	}
+}
+
+func (eid SFlowEnterpriseID) GetType() SFlowEnterpriseID {
+	return SFlowStandard
+}
+
+// SFlowSampleType specifies the type of sample. Only flow samples
+// and counter samples are supported
+type SFlowSampleType uint32
+
+const (
+	SFlowTypeFlowSample            SFlowSampleType = 1
+	SFlowTypeCounterSample         SFlowSampleType = 2
+	SFlowTypeExpandedFlowSample    SFlowSampleType = 3
+	SFlowTypeExpandedCounterSample SFlowSampleType = 4
+)
+
+func (st SFlowSampleType) GetType() SFlowSampleType {
+	switch st {
+	case SFlowTypeFlowSample:
+		return SFlowTypeFlowSample
+	case SFlowTypeCounterSample:
+		return SFlowTypeCounterSample
+	case SFlowTypeExpandedFlowSample:
+		return SFlowTypeExpandedFlowSample
+	case SFlowTypeExpandedCounterSample:
+		return SFlowTypeExpandedCounterSample
+	default:
+		panic("Invalid Sample Type")
+	}
+}
+
+func (st SFlowSampleType) String() string {
+	switch st {
+	case SFlowTypeFlowSample:
+		return "Flow Sample"
+	case SFlowTypeCounterSample:
+		return "Counter Sample"
+	case SFlowTypeExpandedFlowSample:
+		return "Expanded Flow Sample"
+	case SFlowTypeExpandedCounterSample:
+		return "Expanded Counter Sample"
+	default:
+		return ""
+	}
+}
+
+func (s *SFlowDatagram) LayerType() gopacket.LayerType { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) Payload() []byte { return nil }
+
+func (d *SFlowDatagram) CanDecode() gopacket.LayerClass { return LayerTypeSFlow }
+
+func (d *SFlowDatagram) NextLayerType() gopacket.LayerType { return gopacket.LayerTypePayload }
+
+// SFlowIPType determines what form the IP address being decoded will
+// take. This is an XDR union type allowing for both IPv4 and IPv6
+type SFlowIPType uint32
+
+const (
+	SFlowIPv4 SFlowIPType = 1
+	SFlowIPv6 SFlowIPType = 2
+)
+
+func (s SFlowIPType) String() string {
+	switch s {
+	case SFlowIPv4:
+		return "IPv4"
+	case SFlowIPv6:
+		return "IPv6"
+	default:
+		return ""
+	}
+}
+
+func (s SFlowIPType) Length() int {
+	switch s {
+	case SFlowIPv4:
+		return 4
+	case SFlowIPv6:
+		return 16
+	default:
+		return 0
+	}
+}
+
+func (s *SFlowDatagram) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	var agentAddressType SFlowIPType
+
+	data, s.DatagramVersion = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, agentAddressType = data[4:], SFlowIPType(binary.BigEndian.Uint32(data[:4]))
+	data, s.AgentAddress = data[agentAddressType.Length():], data[:agentAddressType.Length()]
+	data, s.SubAgentID = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, s.SequenceNumber = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, s.AgentUptime = data[4:], binary.BigEndian.Uint32(data[:4])
+	data, s.SampleCount = data[4:], binary.BigEndian.Uint32(data[:4])
+
+	if s.SampleCount < 1 {
+		return fmt.Errorf("SFlow Datagram has invalid sample length: %d", s.SampleCount)
+	}
+	for i := uint32(0); i < s.SampleCount; i++ {
+		sdf := SFlowDataFormat(binary.BigEndian.Uint32(data[:4]))
+		_, sampleType := sdf.decode()
+		switch sampleType {
+		case SFlowTypeFlowSample:
+			if flowSample, err := decodeFlowSample(&data, false); err == nil {
+				s.FlowSamples = append(s.FlowSamples, flowSample)
+			} else {
+				return err
+			}
+		case SFlowTypeCounterSample:
+			if counterSample, err := decodeCounterSample(&data, false); err == nil {
+				s.CounterSamples = append(s.CounterSamples, counterSample)
+			} else {
+				return err
+			}
+		case SFlowTypeExpandedFlowSample:
+			if flowSample, err := decodeFlowSample(&data, true); err == nil {
+				s.FlowSamples = append(s.FlowSamples, flowSample)
+			} else {
+				return err
+			}
+		case SFlowTypeExpandedCounterSample:
+			if counterSample, err := decodeCounterSample(&data, true); err == nil {
+				s.CounterSamples = append(s.CounterSamples, counterSample)
+			} else {
+				return err
+			}
+
+		default:
+			return fmt.Errorf("Unsupported SFlow sample type %d", sampleType)
+		}
+	}
+	return nil
+}
+
+// SFlowFlowSample represents a sampled packet and contains
+// one or more records describing the packet
+type SFlowFlowSample struct {
+	EnterpriseID          SFlowEnterpriseID
+	Format                SFlowSampleType
+	SampleLength          uint32
+	SequenceNumber        uint32
+	SourceIDClass         SFlowSourceFormat
+	SourceIDIndex         SFlowSourceValue
+	SamplingRate          uint32
+	SamplePool            uint32
+	Dropped               uint32
+	InputInterfaceFormat  uint32
+	InputInterface        uint32
+	OutputInterfaceFormat uint32
+	OutputInterface       uint32
+	RecordCount           uint32
+	Records               []SFlowRecord
+}
+
+// Flow samples have the following structure. Note
+// the bit fields to encode the Enterprise ID and the
+// Flow record format: type 1
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  sample length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int sample sequence number           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |id type |       src id index value             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int sampling rate               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                int sample pool                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    int drops                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 int input ifIndex             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                int output ifIndex             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int number of records           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   flow records                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Flow samples have the following structure.
+// Flow record format: type 3
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  sample length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int sample sequence number           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int src id type                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |             int src id index value            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int sampling rate               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                int sample pool                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    int drops                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int input interface format          |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int input interface value           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int output interface format         |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |           int output interface value          |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int number of records           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   flow records                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowFlowDataFormat uint32
+
+func (fdf SFlowFlowDataFormat) decode() (SFlowEnterpriseID, SFlowFlowRecordType) {
+	leftField := fdf >> 12
+	rightField := uint32(0xFFF) & uint32(fdf)
+	return SFlowEnterpriseID(leftField), SFlowFlowRecordType(rightField)
+}
+
+func (fs SFlowFlowSample) GetRecords() []SFlowRecord {
+	return fs.Records
+}
+
+func (fs SFlowFlowSample) GetType() SFlowSampleType {
+	return SFlowTypeFlowSample
+}
+
+func skipRecord(data *[]byte) {
+	recordLength := int(binary.BigEndian.Uint32((*data)[4:]))
+	*data = (*data)[(recordLength+((4-recordLength)%4))+8:]
+}
+
+func decodeFlowSample(data *[]byte, expanded bool) (SFlowFlowSample, error) {
+	s := SFlowFlowSample{}
+	var sdf SFlowDataFormat
+	*data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	var sdc SFlowDataSource
+
+	s.EnterpriseID, s.Format = sdf.decode()
+	if len(*data) < 4 {
+		return SFlowFlowSample{}, errors.New("ethernet counters too small")
+	}
+	*data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowFlowSample{}, errors.New("ethernet counters too small")
+	}
+	*data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if expanded {
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.SourceIDClass = (*data)[4:], SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4]))
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.SourceIDIndex = (*data)[4:], SFlowSourceValue(binary.BigEndian.Uint32((*data)[:4]))
+	} else {
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+		s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+	}
+	if len(*data) < 4 {
+		return SFlowFlowSample{}, errors.New("ethernet counters too small")
+	}
+	*data, s.SamplingRate = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowFlowSample{}, errors.New("ethernet counters too small")
+	}
+	*data, s.SamplePool = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowFlowSample{}, errors.New("ethernet counters too small")
+	}
+	*data, s.Dropped = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	if expanded {
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.InputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.OutputInterfaceFormat = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	} else {
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.InputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		if len(*data) < 4 {
+			return SFlowFlowSample{}, errors.New("ethernet counters too small")
+		}
+		*data, s.OutputInterface = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	}
+	if len(*data) < 4 {
+		return SFlowFlowSample{}, errors.New("ethernet counters too small")
+	}
+	*data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	for i := uint32(0); i < s.RecordCount; i++ {
+		rdf := SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+		enterpriseID, flowRecordType := rdf.decode()
+
+		// Try to decode when EnterpriseID is 0 signaling
+		// default sflow structs are used according specification
+		// Unexpected behavior detected for e.g. with pmacct
+		if enterpriseID == 0 {
+			switch flowRecordType {
+			case SFlowTypeRawPacketFlow:
+				if record, err := decodeRawPacketFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedUserFlow:
+				if record, err := decodeExtendedUserFlow(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedUrlFlow:
+				if record, err := decodeExtendedURLRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedSwitchFlow:
+				if record, err := decodeExtendedSwitchFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedRouterFlow:
+				if record, err := decodeExtendedRouterFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedGatewayFlow:
+				if record, err := decodeExtendedGatewayFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeEthernetFrameFlow:
+				if record, err := decodeEthernetFrameFlowRecord(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeIpv4Flow:
+				if record, err := decodeSFlowIpv4Record(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeIpv6Flow:
+				if record, err := decodeSFlowIpv6Record(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedMlpsFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsFlow")
+			case SFlowTypeExtendedNatFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedNatFlow")
+			case SFlowTypeExtendedMlpsTunnelFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsTunnelFlow")
+			case SFlowTypeExtendedMlpsVcFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsVcFlow")
+			case SFlowTypeExtendedMlpsFecFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsFecFlow")
+			case SFlowTypeExtendedMlpsLvpFecFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedMlpsLvpFecFlow")
+			case SFlowTypeExtendedVlanFlow:
+				// TODO
+				skipRecord(data)
+				return s, errors.New("skipping TypeExtendedVlanFlow")
+			case SFlowTypeExtendedIpv4TunnelEgressFlow:
+				if record, err := decodeExtendedIpv4TunnelEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedIpv4TunnelIngressFlow:
+				if record, err := decodeExtendedIpv4TunnelIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedIpv6TunnelEgressFlow:
+				if record, err := decodeExtendedIpv6TunnelEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedIpv6TunnelIngressFlow:
+				if record, err := decodeExtendedIpv6TunnelIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedDecapsulateEgressFlow:
+				if record, err := decodeExtendedDecapsulateEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedDecapsulateIngressFlow:
+				if record, err := decodeExtendedDecapsulateIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedVniEgressFlow:
+				if record, err := decodeExtendedVniEgress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			case SFlowTypeExtendedVniIngressFlow:
+				if record, err := decodeExtendedVniIngress(data); err == nil {
+					s.Records = append(s.Records, record)
+				} else {
+					return s, err
+				}
+			default:
+				return s, fmt.Errorf("Unsupported flow record type: %d", flowRecordType)
+			}
+		} else {
+			skipRecord(data)
+		}
+	}
+	return s, nil
+}
+
+// Counter samples report information about various counter
+// objects. Typically these are items like IfInOctets, or
+// CPU / Memory stats, etc. SFlow will report these at regular
+// intervals as configured on the agent. If one were sufficiently
+// industrious, this could be used to replace the typical
+// SNMP polling used for such things.
+type SFlowCounterSample struct {
+	EnterpriseID   SFlowEnterpriseID
+	Format         SFlowSampleType
+	SampleLength   uint32
+	SequenceNumber uint32
+	SourceIDClass  SFlowSourceFormat
+	SourceIDIndex  SFlowSourceValue
+	RecordCount    uint32
+	Records        []SFlowRecord
+}
+
+// Counter samples have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |          int sample sequence number           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |id type |       src id index value             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               int number of records           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                counter records                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowCounterDataFormat uint32
+
+func (cdf SFlowCounterDataFormat) decode() (SFlowEnterpriseID, SFlowCounterRecordType) {
+	leftField := cdf >> 12
+	rightField := uint32(0xFFF) & uint32(cdf)
+	return SFlowEnterpriseID(leftField), SFlowCounterRecordType(rightField)
+}
+
+// GetRecords will return a slice of interface types
+// representing records. A type switch can be used to
+// get at the underlying SFlowCounterRecordType.
+func (cs SFlowCounterSample) GetRecords() []SFlowRecord {
+	return cs.Records
+}
+
+// GetType will report the type of sample. Only the
+// compact form of counter samples is supported
+func (cs SFlowCounterSample) GetType() SFlowSampleType {
+	return SFlowTypeCounterSample
+}
+
+type SFlowCounterRecordType uint32
+
+const (
+	SFlowTypeGenericInterfaceCounters   SFlowCounterRecordType = 1
+	SFlowTypeEthernetInterfaceCounters  SFlowCounterRecordType = 2
+	SFlowTypeTokenRingInterfaceCounters SFlowCounterRecordType = 3
+	SFlowType100BaseVGInterfaceCounters SFlowCounterRecordType = 4
+	SFlowTypeVLANCounters               SFlowCounterRecordType = 5
+	SFlowTypeLACPCounters               SFlowCounterRecordType = 7
+	SFlowTypeProcessorCounters          SFlowCounterRecordType = 1001
+	SFlowTypeOpenflowPortCounters       SFlowCounterRecordType = 1004
+	SFlowTypePORTNAMECounters           SFlowCounterRecordType = 1005
+	SFLowTypeAPPRESOURCESCounters       SFlowCounterRecordType = 2203
+	SFlowTypeOVSDPCounters              SFlowCounterRecordType = 2207
+)
+
+func (cr SFlowCounterRecordType) String() string {
+	switch cr {
+	case SFlowTypeGenericInterfaceCounters:
+		return "Generic Interface Counters"
+	case SFlowTypeEthernetInterfaceCounters:
+		return "Ethernet Interface Counters"
+	case SFlowTypeTokenRingInterfaceCounters:
+		return "Token Ring Interface Counters"
+	case SFlowType100BaseVGInterfaceCounters:
+		return "100BaseVG Interface Counters"
+	case SFlowTypeVLANCounters:
+		return "VLAN Counters"
+	case SFlowTypeLACPCounters:
+		return "LACP Counters"
+	case SFlowTypeProcessorCounters:
+		return "Processor Counters"
+	case SFlowTypeOpenflowPortCounters:
+		return "Openflow Port Counters"
+	case SFlowTypePORTNAMECounters:
+		return "PORT NAME Counters"
+	case SFLowTypeAPPRESOURCESCounters:
+		return "App Resources Counters"
+	case SFlowTypeOVSDPCounters:
+		return "OVSDP Counters"
+	default:
+		return ""
+
+	}
+}
+
+func decodeCounterSample(data *[]byte, expanded bool) (SFlowCounterSample, error) {
+	s := SFlowCounterSample{}
+	var sdc SFlowDataSource
+	var sdce SFlowDataSourceExpanded
+	var sdf SFlowDataFormat
+
+	*data, sdf = (*data)[4:], SFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	s.EnterpriseID, s.Format = sdf.decode()
+	*data, s.SampleLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, s.SequenceNumber = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if expanded {
+		*data, sdce = (*data)[8:], SFlowDataSourceExpanded{SFlowSourceFormat(binary.BigEndian.Uint32((*data)[:4])), SFlowSourceValue(binary.BigEndian.Uint32((*data)[4:8]))}
+		s.SourceIDClass, s.SourceIDIndex = sdce.decode()
+	} else {
+		*data, sdc = (*data)[4:], SFlowDataSource(binary.BigEndian.Uint32((*data)[:4]))
+		s.SourceIDClass, s.SourceIDIndex = sdc.decode()
+	}
+	*data, s.RecordCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	for i := uint32(0); i < s.RecordCount; i++ {
+		cdf := SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+		_, counterRecordType := cdf.decode()
+		switch counterRecordType {
+		case SFlowTypeGenericInterfaceCounters:
+			if record, err := decodeGenericInterfaceCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeEthernetInterfaceCounters:
+			if record, err := decodeEthernetCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeTokenRingInterfaceCounters:
+			skipRecord(data)
+			return s, errors.New("skipping TypeTokenRingInterfaceCounters")
+		case SFlowType100BaseVGInterfaceCounters:
+			skipRecord(data)
+			return s, errors.New("skipping Type100BaseVGInterfaceCounters")
+		case SFlowTypeVLANCounters:
+			if record, err := decodeVLANCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeLACPCounters:
+			if record, err := decodeLACPCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeProcessorCounters:
+			if record, err := decodeProcessorCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeOpenflowPortCounters:
+			if record, err := decodeOpenflowportCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypePORTNAMECounters:
+			if record, err := decodePortnameCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFLowTypeAPPRESOURCESCounters:
+			if record, err := decodeAppresourcesCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		case SFlowTypeOVSDPCounters:
+			if record, err := decodeOVSDPCounters(data); err == nil {
+				s.Records = append(s.Records, record)
+			} else {
+				return s, err
+			}
+		default:
+			return s, fmt.Errorf("Invalid counter record type: %d", counterRecordType)
+		}
+	}
+	return s, nil
+}
+
+// SFlowBaseFlowRecord holds the fields common to all records
+// of type SFlowFlowRecordType
+type SFlowBaseFlowRecord struct {
+	EnterpriseID   SFlowEnterpriseID
+	Format         SFlowFlowRecordType
+	FlowDataLength uint32
+}
+
+func (bfr SFlowBaseFlowRecord) GetType() SFlowFlowRecordType {
+	return bfr.Format
+}
+
+// SFlowFlowRecordType denotes what kind of Flow Record is
+// represented. See RFC 3176
+type SFlowFlowRecordType uint32
+
+const (
+	SFlowTypeRawPacketFlow                  SFlowFlowRecordType = 1
+	SFlowTypeEthernetFrameFlow              SFlowFlowRecordType = 2
+	SFlowTypeIpv4Flow                       SFlowFlowRecordType = 3
+	SFlowTypeIpv6Flow                       SFlowFlowRecordType = 4
+	SFlowTypeExtendedSwitchFlow             SFlowFlowRecordType = 1001
+	SFlowTypeExtendedRouterFlow             SFlowFlowRecordType = 1002
+	SFlowTypeExtendedGatewayFlow            SFlowFlowRecordType = 1003
+	SFlowTypeExtendedUserFlow               SFlowFlowRecordType = 1004
+	SFlowTypeExtendedUrlFlow                SFlowFlowRecordType = 1005
+	SFlowTypeExtendedMlpsFlow               SFlowFlowRecordType = 1006
+	SFlowTypeExtendedNatFlow                SFlowFlowRecordType = 1007
+	SFlowTypeExtendedMlpsTunnelFlow         SFlowFlowRecordType = 1008
+	SFlowTypeExtendedMlpsVcFlow             SFlowFlowRecordType = 1009
+	SFlowTypeExtendedMlpsFecFlow            SFlowFlowRecordType = 1010
+	SFlowTypeExtendedMlpsLvpFecFlow         SFlowFlowRecordType = 1011
+	SFlowTypeExtendedVlanFlow               SFlowFlowRecordType = 1012
+	SFlowTypeExtendedIpv4TunnelEgressFlow   SFlowFlowRecordType = 1023
+	SFlowTypeExtendedIpv4TunnelIngressFlow  SFlowFlowRecordType = 1024
+	SFlowTypeExtendedIpv6TunnelEgressFlow   SFlowFlowRecordType = 1025
+	SFlowTypeExtendedIpv6TunnelIngressFlow  SFlowFlowRecordType = 1026
+	SFlowTypeExtendedDecapsulateEgressFlow  SFlowFlowRecordType = 1027
+	SFlowTypeExtendedDecapsulateIngressFlow SFlowFlowRecordType = 1028
+	SFlowTypeExtendedVniEgressFlow          SFlowFlowRecordType = 1029
+	SFlowTypeExtendedVniIngressFlow         SFlowFlowRecordType = 1030
+)
+
+func (rt SFlowFlowRecordType) String() string {
+	switch rt {
+	case SFlowTypeRawPacketFlow:
+		return "Raw Packet Flow Record"
+	case SFlowTypeEthernetFrameFlow:
+		return "Ethernet Frame Flow Record"
+	case SFlowTypeIpv4Flow:
+		return "IPv4 Flow Record"
+	case SFlowTypeIpv6Flow:
+		return "IPv6 Flow Record"
+	case SFlowTypeExtendedSwitchFlow:
+		return "Extended Switch Flow Record"
+	case SFlowTypeExtendedRouterFlow:
+		return "Extended Router Flow Record"
+	case SFlowTypeExtendedGatewayFlow:
+		return "Extended Gateway Flow Record"
+	case SFlowTypeExtendedUserFlow:
+		return "Extended User Flow Record"
+	case SFlowTypeExtendedUrlFlow:
+		return "Extended URL Flow Record"
+	case SFlowTypeExtendedMlpsFlow:
+		return "Extended MPLS Flow Record"
+	case SFlowTypeExtendedNatFlow:
+		return "Extended NAT Flow Record"
+	case SFlowTypeExtendedMlpsTunnelFlow:
+		return "Extended MPLS Tunnel Flow Record"
+	case SFlowTypeExtendedMlpsVcFlow:
+		return "Extended MPLS VC Flow Record"
+	case SFlowTypeExtendedMlpsFecFlow:
+		return "Extended MPLS FEC Flow Record"
+	case SFlowTypeExtendedMlpsLvpFecFlow:
+		return "Extended MPLS LVP FEC Flow Record"
+	case SFlowTypeExtendedVlanFlow:
+		return "Extended VLAN Flow Record"
+	case SFlowTypeExtendedIpv4TunnelEgressFlow:
+		return "Extended IPv4 Tunnel Egress Record"
+	case SFlowTypeExtendedIpv4TunnelIngressFlow:
+		return "Extended IPv4 Tunnel Ingress Record"
+	case SFlowTypeExtendedIpv6TunnelEgressFlow:
+		return "Extended IPv6 Tunnel Egress Record"
+	case SFlowTypeExtendedIpv6TunnelIngressFlow:
+		return "Extended IPv6 Tunnel Ingress Record"
+	case SFlowTypeExtendedDecapsulateEgressFlow:
+		return "Extended Decapsulate Egress Record"
+	case SFlowTypeExtendedDecapsulateIngressFlow:
+		return "Extended Decapsulate Ingress Record"
+	case SFlowTypeExtendedVniEgressFlow:
+		return "Extended VNI Ingress Record"
+	case SFlowTypeExtendedVniIngressFlow:
+		return "Extended VNI Ingress Record"
+	default:
+		return ""
+	}
+}
+
+// SFlowRawPacketFlowRecords hold information about a sampled
+// packet grabbed as it transited the agent. This is
+// perhaps the most useful and interesting record type,
+// as it holds the headers of the sampled packet and
+// can be used to build up a complete picture of the
+// traffic patterns on a network.
+//
+// The raw packet header is sent back into gopacket for
+// decoding, and the resulting gopackt.Packet is stored
+// in the Header member
+type SFlowRawPacketFlowRecord struct {
+	SFlowBaseFlowRecord
+	HeaderProtocol SFlowRawHeaderProtocol
+	FrameLength    uint32
+	PayloadRemoved uint32
+	HeaderLength   uint32
+	Header         gopacket.Packet
+}
+
+// Raw packet record types have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Header Protocol               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Frame Length                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Payload Removed               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Header Length                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  \                     Header                    \
+//  \                                               \
+//  \                                               \
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowRawHeaderProtocol uint32
+
+const (
+	SFlowProtoEthernet   SFlowRawHeaderProtocol = 1
+	SFlowProtoISO88024   SFlowRawHeaderProtocol = 2
+	SFlowProtoISO88025   SFlowRawHeaderProtocol = 3
+	SFlowProtoFDDI       SFlowRawHeaderProtocol = 4
+	SFlowProtoFrameRelay SFlowRawHeaderProtocol = 5
+	SFlowProtoX25        SFlowRawHeaderProtocol = 6
+	SFlowProtoPPP        SFlowRawHeaderProtocol = 7
+	SFlowProtoSMDS       SFlowRawHeaderProtocol = 8
+	SFlowProtoAAL5       SFlowRawHeaderProtocol = 9
+	SFlowProtoAAL5_IP    SFlowRawHeaderProtocol = 10 /* e.g. Cisco AAL5 mux */
+	SFlowProtoIPv4       SFlowRawHeaderProtocol = 11
+	SFlowProtoIPv6       SFlowRawHeaderProtocol = 12
+	SFlowProtoMPLS       SFlowRawHeaderProtocol = 13
+	SFlowProtoPOS        SFlowRawHeaderProtocol = 14 /* RFC 1662, 2615 */
+)
+
+func (sfhp SFlowRawHeaderProtocol) String() string {
+	switch sfhp {
+	case SFlowProtoEthernet:
+		return "ETHERNET-ISO88023"
+	case SFlowProtoISO88024:
+		return "ISO88024-TOKENBUS"
+	case SFlowProtoISO88025:
+		return "ISO88025-TOKENRING"
+	case SFlowProtoFDDI:
+		return "FDDI"
+	case SFlowProtoFrameRelay:
+		return "FRAME-RELAY"
+	case SFlowProtoX25:
+		return "X25"
+	case SFlowProtoPPP:
+		return "PPP"
+	case SFlowProtoSMDS:
+		return "SMDS"
+	case SFlowProtoAAL5:
+		return "AAL5"
+	case SFlowProtoAAL5_IP:
+		return "AAL5-IP"
+	case SFlowProtoIPv4:
+		return "IPv4"
+	case SFlowProtoIPv6:
+		return "IPv6"
+	case SFlowProtoMPLS:
+		return "MPLS"
+	case SFlowProtoPOS:
+		return "POS"
+	}
+	return "UNKNOWN"
+}
+
+func decodeRawPacketFlowRecord(data *[]byte) (SFlowRawPacketFlowRecord, error) {
+	rec := SFlowRawPacketFlowRecord{}
+	header := []byte{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.HeaderProtocol = (*data)[4:], SFlowRawHeaderProtocol(binary.BigEndian.Uint32((*data)[:4]))
+	*data, rec.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.PayloadRemoved = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.HeaderLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	headerLenWithPadding := int(rec.HeaderLength + ((4 - rec.HeaderLength) % 4))
+	*data, header = (*data)[headerLenWithPadding:], (*data)[:headerLenWithPadding]
+	rec.Header = gopacket.NewPacket(header, LayerTypeEthernet, gopacket.Default)
+	return rec, nil
+}
+
+// SFlowExtendedSwitchFlowRecord give additional information
+// about the sampled packet if it's available. It's mainly
+// useful for getting at the incoming and outgoing VLANs
+// An agent may or may not provide this information.
+type SFlowExtendedSwitchFlowRecord struct {
+	SFlowBaseFlowRecord
+	IncomingVLAN         uint32
+	IncomingVLANPriority uint32
+	OutgoingVLAN         uint32
+	OutgoingVLANPriority uint32
+}
+
+// Extended switch records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Incoming VLAN               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Incoming VLAN Priority         |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Outgoing VLAN               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Outgoing VLAN Priority         |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedSwitchFlowRecord(data *[]byte) (SFlowExtendedSwitchFlowRecord, error) {
+	es := SFlowExtendedSwitchFlowRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	es.EnterpriseID, es.Format = fdf.decode()
+	*data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.IncomingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.IncomingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.OutgoingVLAN = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.OutgoingVLANPriority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return es, nil
+}
+
+// SFlowExtendedRouterFlowRecord gives additional information
+// about the layer 3 routing information used to forward
+// the packet
+type SFlowExtendedRouterFlowRecord struct {
+	SFlowBaseFlowRecord
+	NextHop                net.IP
+	NextHopSourceMask      uint32
+	NextHopDestinationMask uint32
+}
+
+// Extended router records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |   IP version of next hop router (1=v4|2=v6)   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /     Next Hop address (v4=4byte|v6=16byte)     /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Next Hop Source Mask             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Next Hop Destination Mask        |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeExtendedRouterFlowRecord(data *[]byte) (SFlowExtendedRouterFlowRecord, error) {
+	er := SFlowExtendedRouterFlowRecord{}
+	var fdf SFlowFlowDataFormat
+	var extendedRouterAddressType SFlowIPType
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	er.EnterpriseID, er.Format = fdf.decode()
+	*data, er.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, extendedRouterAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+	*data, er.NextHop = (*data)[extendedRouterAddressType.Length():], (*data)[:extendedRouterAddressType.Length()]
+	*data, er.NextHopSourceMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, er.NextHopDestinationMask = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return er, nil
+}
+
+// SFlowExtendedGatewayFlowRecord describes information treasured by
+// nework engineers everywhere: AS path information listing which
+// BGP peer sent the packet, and various other BGP related info.
+// This information is vital because it gives a picture of how much
+// traffic is being sent from / received by various BGP peers.
+
+// Extended gateway records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |   IP version of next hop router (1=v4|2=v6)   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /     Next Hop address (v4=4byte|v6=16byte)     /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                       AS                      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  Source AS                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Peer AS                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  AS Path Count                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                AS Path / Sequence             /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   Communities                 /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Local Pref                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// AS Path / Sequence:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |     AS Source Type (Path=1 / Sequence=2)      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Path / Sequence length           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /              Path / Sequence Members          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+// Communities:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                communitiy length              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /              communitiy Members               /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedGatewayFlowRecord struct {
+	SFlowBaseFlowRecord
+	NextHop     net.IP
+	AS          uint32
+	SourceAS    uint32
+	PeerAS      uint32
+	ASPathCount uint32
+	ASPath      []SFlowASDestination
+	Communities []uint32
+	LocalPref   uint32
+}
+
+type SFlowASPathType uint32
+
+const (
+	SFlowASSet      SFlowASPathType = 1
+	SFlowASSequence SFlowASPathType = 2
+)
+
+func (apt SFlowASPathType) String() string {
+	switch apt {
+	case SFlowASSet:
+		return "AS Set"
+	case SFlowASSequence:
+		return "AS Sequence"
+	default:
+		return ""
+	}
+}
+
+type SFlowASDestination struct {
+	Type    SFlowASPathType
+	Count   uint32
+	Members []uint32
+}
+
+func (asd SFlowASDestination) String() string {
+	switch asd.Type {
+	case SFlowASSet:
+		return fmt.Sprint("AS Set:", asd.Members)
+	case SFlowASSequence:
+		return fmt.Sprint("AS Sequence:", asd.Members)
+	default:
+		return ""
+	}
+}
+
+func (ad *SFlowASDestination) decodePath(data *[]byte) {
+	*data, ad.Type = (*data)[4:], SFlowASPathType(binary.BigEndian.Uint32((*data)[:4]))
+	*data, ad.Count = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	ad.Members = make([]uint32, ad.Count)
+	for i := uint32(0); i < ad.Count; i++ {
+		var member uint32
+		*data, member = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		ad.Members[i] = member
+	}
+}
+
+func decodeExtendedGatewayFlowRecord(data *[]byte) (SFlowExtendedGatewayFlowRecord, error) {
+	eg := SFlowExtendedGatewayFlowRecord{}
+	var fdf SFlowFlowDataFormat
+	var extendedGatewayAddressType SFlowIPType
+	var communitiesLength uint32
+	var community uint32
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	eg.EnterpriseID, eg.Format = fdf.decode()
+	*data, eg.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, extendedGatewayAddressType = (*data)[4:], SFlowIPType(binary.BigEndian.Uint32((*data)[:4]))
+	*data, eg.NextHop = (*data)[extendedGatewayAddressType.Length():], (*data)[:extendedGatewayAddressType.Length()]
+	*data, eg.AS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eg.SourceAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eg.PeerAS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eg.ASPathCount = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	for i := uint32(0); i < eg.ASPathCount; i++ {
+		asPath := SFlowASDestination{}
+		asPath.decodePath(data)
+		eg.ASPath = append(eg.ASPath, asPath)
+	}
+	*data, communitiesLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	eg.Communities = make([]uint32, communitiesLength)
+	for j := uint32(0); j < communitiesLength; j++ {
+		*data, community = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+		eg.Communities[j] = community
+	}
+	*data, eg.LocalPref = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return eg, nil
+}
+
+// **************************************************
+//  Extended URL Flow Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   direction                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      URL                      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      Host                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowURLDirection uint32
+
+const (
+	SFlowURLsrc SFlowURLDirection = 1
+	SFlowURLdst SFlowURLDirection = 2
+)
+
+func (urld SFlowURLDirection) String() string {
+	switch urld {
+	case SFlowURLsrc:
+		return "Source address is the server"
+	case SFlowURLdst:
+		return "Destination address is the server"
+	default:
+		return ""
+	}
+}
+
+type SFlowExtendedURLRecord struct {
+	SFlowBaseFlowRecord
+	Direction SFlowURLDirection
+	URL       string
+	Host      string
+}
+
+func decodeExtendedURLRecord(data *[]byte) (SFlowExtendedURLRecord, error) {
+	eur := SFlowExtendedURLRecord{}
+	var fdf SFlowFlowDataFormat
+	var urlLen uint32
+	var urlLenWithPad int
+	var hostLen uint32
+	var hostLenWithPad int
+	var urlBytes []byte
+	var hostBytes []byte
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	eur.EnterpriseID, eur.Format = fdf.decode()
+	*data, eur.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eur.Direction = (*data)[4:], SFlowURLDirection(binary.BigEndian.Uint32((*data)[:4]))
+	*data, urlLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	urlLenWithPad = int(urlLen + ((4 - urlLen) % 4))
+	*data, urlBytes = (*data)[urlLenWithPad:], (*data)[:urlLenWithPad]
+	eur.URL = string(urlBytes[:urlLen])
+	*data, hostLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	hostLenWithPad = int(hostLen + ((4 - hostLen) % 4))
+	*data, hostBytes = (*data)[hostLenWithPad:], (*data)[:hostLenWithPad]
+	eur.Host = string(hostBytes[:hostLen])
+	return eur, nil
+}
+
+// **************************************************
+//  Extended User Flow Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Source Character Set           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 Source User Id                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |              Destination Character Set        |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Destination User ID             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowExtendedUserFlow struct {
+	SFlowBaseFlowRecord
+	SourceCharSet      SFlowCharSet
+	SourceUserID       string
+	DestinationCharSet SFlowCharSet
+	DestinationUserID  string
+}
+
+type SFlowCharSet uint32
+
+const (
+	SFlowCSunknown                 SFlowCharSet = 2
+	SFlowCSASCII                   SFlowCharSet = 3
+	SFlowCSISOLatin1               SFlowCharSet = 4
+	SFlowCSISOLatin2               SFlowCharSet = 5
+	SFlowCSISOLatin3               SFlowCharSet = 6
+	SFlowCSISOLatin4               SFlowCharSet = 7
+	SFlowCSISOLatinCyrillic        SFlowCharSet = 8
+	SFlowCSISOLatinArabic          SFlowCharSet = 9
+	SFlowCSISOLatinGreek           SFlowCharSet = 10
+	SFlowCSISOLatinHebrew          SFlowCharSet = 11
+	SFlowCSISOLatin5               SFlowCharSet = 12
+	SFlowCSISOLatin6               SFlowCharSet = 13
+	SFlowCSISOTextComm             SFlowCharSet = 14
+	SFlowCSHalfWidthKatakana       SFlowCharSet = 15
+	SFlowCSJISEncoding             SFlowCharSet = 16
+	SFlowCSShiftJIS                SFlowCharSet = 17
+	SFlowCSEUCPkdFmtJapanese       SFlowCharSet = 18
+	SFlowCSEUCFixWidJapanese       SFlowCharSet = 19
+	SFlowCSISO4UnitedKingdom       SFlowCharSet = 20
+	SFlowCSISO11SwedishForNames    SFlowCharSet = 21
+	SFlowCSISO15Italian            SFlowCharSet = 22
+	SFlowCSISO17Spanish            SFlowCharSet = 23
+	SFlowCSISO21German             SFlowCharSet = 24
+	SFlowCSISO60DanishNorwegian    SFlowCharSet = 25
+	SFlowCSISO69French             SFlowCharSet = 26
+	SFlowCSISO10646UTF1            SFlowCharSet = 27
+	SFlowCSISO646basic1983         SFlowCharSet = 28
+	SFlowCSINVARIANT               SFlowCharSet = 29
+	SFlowCSISO2IntlRefVersion      SFlowCharSet = 30
+	SFlowCSNATSSEFI                SFlowCharSet = 31
+	SFlowCSNATSSEFIADD             SFlowCharSet = 32
+	SFlowCSNATSDANO                SFlowCharSet = 33
+	SFlowCSNATSDANOADD             SFlowCharSet = 34
+	SFlowCSISO10Swedish            SFlowCharSet = 35
+	SFlowCSKSC56011987             SFlowCharSet = 36
+	SFlowCSISO2022KR               SFlowCharSet = 37
+	SFlowCSEUCKR                   SFlowCharSet = 38
+	SFlowCSISO2022JP               SFlowCharSet = 39
+	SFlowCSISO2022JP2              SFlowCharSet = 40
+	SFlowCSISO13JISC6220jp         SFlowCharSet = 41
+	SFlowCSISO14JISC6220ro         SFlowCharSet = 42
+	SFlowCSISO16Portuguese         SFlowCharSet = 43
+	SFlowCSISO18Greek7Old          SFlowCharSet = 44
+	SFlowCSISO19LatinGreek         SFlowCharSet = 45
+	SFlowCSISO25French             SFlowCharSet = 46
+	SFlowCSISO27LatinGreek1        SFlowCharSet = 47
+	SFlowCSISO5427Cyrillic         SFlowCharSet = 48
+	SFlowCSISO42JISC62261978       SFlowCharSet = 49
+	SFlowCSISO47BSViewdata         SFlowCharSet = 50
+	SFlowCSISO49INIS               SFlowCharSet = 51
+	SFlowCSISO50INIS8              SFlowCharSet = 52
+	SFlowCSISO51INISCyrillic       SFlowCharSet = 53
+	SFlowCSISO54271981             SFlowCharSet = 54
+	SFlowCSISO5428Greek            SFlowCharSet = 55
+	SFlowCSISO57GB1988             SFlowCharSet = 56
+	SFlowCSISO58GB231280           SFlowCharSet = 57
+	SFlowCSISO61Norwegian2         SFlowCharSet = 58
+	SFlowCSISO70VideotexSupp1      SFlowCharSet = 59
+	SFlowCSISO84Portuguese2        SFlowCharSet = 60
+	SFlowCSISO85Spanish2           SFlowCharSet = 61
+	SFlowCSISO86Hungarian          SFlowCharSet = 62
+	SFlowCSISO87JISX0208           SFlowCharSet = 63
+	SFlowCSISO88Greek7             SFlowCharSet = 64
+	SFlowCSISO89ASMO449            SFlowCharSet = 65
+	SFlowCSISO90                   SFlowCharSet = 66
+	SFlowCSISO91JISC62291984a      SFlowCharSet = 67
+	SFlowCSISO92JISC62991984b      SFlowCharSet = 68
+	SFlowCSISO93JIS62291984badd    SFlowCharSet = 69
+	SFlowCSISO94JIS62291984hand    SFlowCharSet = 70
+	SFlowCSISO95JIS62291984handadd SFlowCharSet = 71
+	SFlowCSISO96JISC62291984kana   SFlowCharSet = 72
+	SFlowCSISO2033                 SFlowCharSet = 73
+	SFlowCSISO99NAPLPS             SFlowCharSet = 74
+	SFlowCSISO102T617bit           SFlowCharSet = 75
+	SFlowCSISO103T618bit           SFlowCharSet = 76
+	SFlowCSISO111ECMACyrillic      SFlowCharSet = 77
+	SFlowCSa71                     SFlowCharSet = 78
+	SFlowCSa72                     SFlowCharSet = 79
+	SFlowCSISO123CSAZ24341985gr    SFlowCharSet = 80
+	SFlowCSISO88596E               SFlowCharSet = 81
+	SFlowCSISO88596I               SFlowCharSet = 82
+	SFlowCSISO128T101G2            SFlowCharSet = 83
+	SFlowCSISO88598E               SFlowCharSet = 84
+	SFlowCSISO88598I               SFlowCharSet = 85
+	SFlowCSISO139CSN369103         SFlowCharSet = 86
+	SFlowCSISO141JUSIB1002         SFlowCharSet = 87
+	SFlowCSISO143IECP271           SFlowCharSet = 88
+	SFlowCSISO146Serbian           SFlowCharSet = 89
+	SFlowCSISO147Macedonian        SFlowCharSet = 90
+	SFlowCSISO150                  SFlowCharSet = 91
+	SFlowCSISO151Cuba              SFlowCharSet = 92
+	SFlowCSISO6937Add              SFlowCharSet = 93
+	SFlowCSISO153GOST1976874       SFlowCharSet = 94
+	SFlowCSISO8859Supp             SFlowCharSet = 95
+	SFlowCSISO10367Box             SFlowCharSet = 96
+	SFlowCSISO158Lap               SFlowCharSet = 97
+	SFlowCSISO159JISX02121990      SFlowCharSet = 98
+	SFlowCSISO646Danish            SFlowCharSet = 99
+	SFlowCSUSDK                    SFlowCharSet = 100
+	SFlowCSDKUS                    SFlowCharSet = 101
+	SFlowCSKSC5636                 SFlowCharSet = 102
+	SFlowCSUnicode11UTF7           SFlowCharSet = 103
+	SFlowCSISO2022CN               SFlowCharSet = 104
+	SFlowCSISO2022CNEXT            SFlowCharSet = 105
+	SFlowCSUTF8                    SFlowCharSet = 106
+	SFlowCSISO885913               SFlowCharSet = 109
+	SFlowCSISO885914               SFlowCharSet = 110
+	SFlowCSISO885915               SFlowCharSet = 111
+	SFlowCSISO885916               SFlowCharSet = 112
+	SFlowCSGBK                     SFlowCharSet = 113
+	SFlowCSGB18030                 SFlowCharSet = 114
+	SFlowCSOSDEBCDICDF0415         SFlowCharSet = 115
+	SFlowCSOSDEBCDICDF03IRV        SFlowCharSet = 116
+	SFlowCSOSDEBCDICDF041          SFlowCharSet = 117
+	SFlowCSISO115481               SFlowCharSet = 118
+	SFlowCSKZ1048                  SFlowCharSet = 119
+	SFlowCSUnicode                 SFlowCharSet = 1000
+	SFlowCSUCS4                    SFlowCharSet = 1001
+	SFlowCSUnicodeASCII            SFlowCharSet = 1002
+	SFlowCSUnicodeLatin1           SFlowCharSet = 1003
+	SFlowCSUnicodeJapanese         SFlowCharSet = 1004
+	SFlowCSUnicodeIBM1261          SFlowCharSet = 1005
+	SFlowCSUnicodeIBM1268          SFlowCharSet = 1006
+	SFlowCSUnicodeIBM1276          SFlowCharSet = 1007
+	SFlowCSUnicodeIBM1264          SFlowCharSet = 1008
+	SFlowCSUnicodeIBM1265          SFlowCharSet = 1009
+	SFlowCSUnicode11               SFlowCharSet = 1010
+	SFlowCSSCSU                    SFlowCharSet = 1011
+	SFlowCSUTF7                    SFlowCharSet = 1012
+	SFlowCSUTF16BE                 SFlowCharSet = 1013
+	SFlowCSUTF16LE                 SFlowCharSet = 1014
+	SFlowCSUTF16                   SFlowCharSet = 1015
+	SFlowCSCESU8                   SFlowCharSet = 1016
+	SFlowCSUTF32                   SFlowCharSet = 1017
+	SFlowCSUTF32BE                 SFlowCharSet = 1018
+	SFlowCSUTF32LE                 SFlowCharSet = 1019
+	SFlowCSBOCU1                   SFlowCharSet = 1020
+	SFlowCSWindows30Latin1         SFlowCharSet = 2000
+	SFlowCSWindows31Latin1         SFlowCharSet = 2001
+	SFlowCSWindows31Latin2         SFlowCharSet = 2002
+	SFlowCSWindows31Latin5         SFlowCharSet = 2003
+	SFlowCSHPRoman8                SFlowCharSet = 2004
+	SFlowCSAdobeStandardEncoding   SFlowCharSet = 2005
+	SFlowCSVenturaUS               SFlowCharSet = 2006
+	SFlowCSVenturaInternational    SFlowCharSet = 2007
+	SFlowCSDECMCS                  SFlowCharSet = 2008
+	SFlowCSPC850Multilingual       SFlowCharSet = 2009
+	SFlowCSPCp852                  SFlowCharSet = 2010
+	SFlowCSPC8CodePage437          SFlowCharSet = 2011
+	SFlowCSPC8DanishNorwegian      SFlowCharSet = 2012
+	SFlowCSPC862LatinHebrew        SFlowCharSet = 2013
+	SFlowCSPC8Turkish              SFlowCharSet = 2014
+	SFlowCSIBMSymbols              SFlowCharSet = 2015
+	SFlowCSIBMThai                 SFlowCharSet = 2016
+	SFlowCSHPLegal                 SFlowCharSet = 2017
+	SFlowCSHPPiFont                SFlowCharSet = 2018
+	SFlowCSHPMath8                 SFlowCharSet = 2019
+	SFlowCSHPPSMath                SFlowCharSet = 2020
+	SFlowCSHPDesktop               SFlowCharSet = 2021
+	SFlowCSVenturaMath             SFlowCharSet = 2022
+	SFlowCSMicrosoftPublishing     SFlowCharSet = 2023
+	SFlowCSWindows31J              SFlowCharSet = 2024
+	SFlowCSGB2312                  SFlowCharSet = 2025
+	SFlowCSBig5                    SFlowCharSet = 2026
+	SFlowCSMacintosh               SFlowCharSet = 2027
+	SFlowCSIBM037                  SFlowCharSet = 2028
+	SFlowCSIBM038                  SFlowCharSet = 2029
+	SFlowCSIBM273                  SFlowCharSet = 2030
+	SFlowCSIBM274                  SFlowCharSet = 2031
+	SFlowCSIBM275                  SFlowCharSet = 2032
+	SFlowCSIBM277                  SFlowCharSet = 2033
+	SFlowCSIBM278                  SFlowCharSet = 2034
+	SFlowCSIBM280                  SFlowCharSet = 2035
+	SFlowCSIBM281                  SFlowCharSet = 2036
+	SFlowCSIBM284                  SFlowCharSet = 2037
+	SFlowCSIBM285                  SFlowCharSet = 2038
+	SFlowCSIBM290                  SFlowCharSet = 2039
+	SFlowCSIBM297                  SFlowCharSet = 2040
+	SFlowCSIBM420                  SFlowCharSet = 2041
+	SFlowCSIBM423                  SFlowCharSet = 2042
+	SFlowCSIBM424                  SFlowCharSet = 2043
+	SFlowCSIBM500                  SFlowCharSet = 2044
+	SFlowCSIBM851                  SFlowCharSet = 2045
+	SFlowCSIBM855                  SFlowCharSet = 2046
+	SFlowCSIBM857                  SFlowCharSet = 2047
+	SFlowCSIBM860                  SFlowCharSet = 2048
+	SFlowCSIBM861                  SFlowCharSet = 2049
+	SFlowCSIBM863                  SFlowCharSet = 2050
+	SFlowCSIBM864                  SFlowCharSet = 2051
+	SFlowCSIBM865                  SFlowCharSet = 2052
+	SFlowCSIBM868                  SFlowCharSet = 2053
+	SFlowCSIBM869                  SFlowCharSet = 2054
+	SFlowCSIBM870                  SFlowCharSet = 2055
+	SFlowCSIBM871                  SFlowCharSet = 2056
+	SFlowCSIBM880                  SFlowCharSet = 2057
+	SFlowCSIBM891                  SFlowCharSet = 2058
+	SFlowCSIBM903                  SFlowCharSet = 2059
+	SFlowCSIBBM904                 SFlowCharSet = 2060
+	SFlowCSIBM905                  SFlowCharSet = 2061
+	SFlowCSIBM918                  SFlowCharSet = 2062
+	SFlowCSIBM1026                 SFlowCharSet = 2063
+	SFlowCSIBMEBCDICATDE           SFlowCharSet = 2064
+	SFlowCSEBCDICATDEA             SFlowCharSet = 2065
+	SFlowCSEBCDICCAFR              SFlowCharSet = 2066
+	SFlowCSEBCDICDKNO              SFlowCharSet = 2067
+	SFlowCSEBCDICDKNOA             SFlowCharSet = 2068
+	SFlowCSEBCDICFISE              SFlowCharSet = 2069
+	SFlowCSEBCDICFISEA             SFlowCharSet = 2070
+	SFlowCSEBCDICFR                SFlowCharSet = 2071
+	SFlowCSEBCDICIT                SFlowCharSet = 2072
+	SFlowCSEBCDICPT                SFlowCharSet = 2073
+	SFlowCSEBCDICES                SFlowCharSet = 2074
+	SFlowCSEBCDICESA               SFlowCharSet = 2075
+	SFlowCSEBCDICESS               SFlowCharSet = 2076
+	SFlowCSEBCDICUK                SFlowCharSet = 2077
+	SFlowCSEBCDICUS                SFlowCharSet = 2078
+	SFlowCSUnknown8BiT             SFlowCharSet = 2079
+	SFlowCSMnemonic                SFlowCharSet = 2080
+	SFlowCSMnem                    SFlowCharSet = 2081
+	SFlowCSVISCII                  SFlowCharSet = 2082
+	SFlowCSVIQR                    SFlowCharSet = 2083
+	SFlowCSKOI8R                   SFlowCharSet = 2084
+	SFlowCSHZGB2312                SFlowCharSet = 2085
+	SFlowCSIBM866                  SFlowCharSet = 2086
+	SFlowCSPC775Baltic             SFlowCharSet = 2087
+	SFlowCSKOI8U                   SFlowCharSet = 2088
+	SFlowCSIBM00858                SFlowCharSet = 2089
+	SFlowCSIBM00924                SFlowCharSet = 2090
+	SFlowCSIBM01140                SFlowCharSet = 2091
+	SFlowCSIBM01141                SFlowCharSet = 2092
+	SFlowCSIBM01142                SFlowCharSet = 2093
+	SFlowCSIBM01143                SFlowCharSet = 2094
+	SFlowCSIBM01144                SFlowCharSet = 2095
+	SFlowCSIBM01145                SFlowCharSet = 2096
+	SFlowCSIBM01146                SFlowCharSet = 2097
+	SFlowCSIBM01147                SFlowCharSet = 2098
+	SFlowCSIBM01148                SFlowCharSet = 2099
+	SFlowCSIBM01149                SFlowCharSet = 2100
+	SFlowCSBig5HKSCS               SFlowCharSet = 2101
+	SFlowCSIBM1047                 SFlowCharSet = 2102
+	SFlowCSPTCP154                 SFlowCharSet = 2103
+	SFlowCSAmiga1251               SFlowCharSet = 2104
+	SFlowCSKOI7switched            SFlowCharSet = 2105
+	SFlowCSBRF                     SFlowCharSet = 2106
+	SFlowCSTSCII                   SFlowCharSet = 2107
+	SFlowCSCP51932                 SFlowCharSet = 2108
+	SFlowCSWindows874              SFlowCharSet = 2109
+	SFlowCSWindows1250             SFlowCharSet = 2250
+	SFlowCSWindows1251             SFlowCharSet = 2251
+	SFlowCSWindows1252             SFlowCharSet = 2252
+	SFlowCSWindows1253             SFlowCharSet = 2253
+	SFlowCSWindows1254             SFlowCharSet = 2254
+	SFlowCSWindows1255             SFlowCharSet = 2255
+	SFlowCSWindows1256             SFlowCharSet = 2256
+	SFlowCSWindows1257             SFlowCharSet = 2257
+	SFlowCSWindows1258             SFlowCharSet = 2258
+	SFlowCSTIS620                  SFlowCharSet = 2259
+	SFlowCS50220                   SFlowCharSet = 2260
+	SFlowCSreserved                SFlowCharSet = 3000
+)
+
+func decodeExtendedUserFlow(data *[]byte) (SFlowExtendedUserFlow, error) {
+	eu := SFlowExtendedUserFlow{}
+	var fdf SFlowFlowDataFormat
+	var srcUserLen uint32
+	var srcUserLenWithPad int
+	var srcUserBytes []byte
+	var dstUserLen uint32
+	var dstUserLenWithPad int
+	var dstUserBytes []byte
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	eu.EnterpriseID, eu.Format = fdf.decode()
+	*data, eu.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, eu.SourceCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+	*data, srcUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	srcUserLenWithPad = int(srcUserLen + ((4 - srcUserLen) % 4))
+	*data, srcUserBytes = (*data)[srcUserLenWithPad:], (*data)[:srcUserLenWithPad]
+	eu.SourceUserID = string(srcUserBytes[:srcUserLen])
+	*data, eu.DestinationCharSet = (*data)[4:], SFlowCharSet(binary.BigEndian.Uint32((*data)[:4]))
+	*data, dstUserLen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	dstUserLenWithPad = int(dstUserLen + ((4 - dstUserLen) % 4))
+	*data, dstUserBytes = (*data)[dstUserLenWithPad:], (*data)[:dstUserLenWithPad]
+	eu.DestinationUserID = string(dstUserBytes[:dstUserLen])
+	return eu, nil
+}
+
+// **************************************************
+//  Packet IP version 4 Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                     Length                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Protocol                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  Source IPv4                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destination IPv4               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Source Port                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destionation Port              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   TCP Flags                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                      TOS                      |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv4Record struct {
+	// The length of the IP packet excluding ower layer encapsulations
+	Length uint32
+	// IP Protocol type (for example, TCP = 6, UDP = 17)
+	Protocol uint32
+	// Source IP Address
+	IPSrc net.IP
+	// Destination IP Address
+	IPDst net.IP
+	// TCP/UDP source port number or equivalent
+	PortSrc uint32
+	// TCP/UDP destination port number or equivalent
+	PortDst uint32
+	// TCP flags
+	TCPFlags uint32
+	// IP type of service
+	TOS uint32
+}
+
+func decodeSFlowIpv4Record(data *[]byte) (SFlowIpv4Record, error) {
+	si := SFlowIpv4Record{}
+
+	*data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.IPSrc = (*data)[4:], net.IP((*data)[:4])
+	*data, si.IPDst = (*data)[4:], net.IP((*data)[:4])
+	*data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.TOS = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return si, nil
+}
+
+// **************************************************
+//  Packet IP version 6 Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                     Length                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Protocol                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  Source IPv4                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destination IPv4               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   Source Port                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Destionation Port              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   TCP Flags                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    Priority                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowIpv6Record struct {
+	// The length of the IP packet excluding ower layer encapsulations
+	Length uint32
+	// IP Protocol type (for example, TCP = 6, UDP = 17)
+	Protocol uint32
+	// Source IP Address
+	IPSrc net.IP
+	// Destination IP Address
+	IPDst net.IP
+	// TCP/UDP source port number or equivalent
+	PortSrc uint32
+	// TCP/UDP destination port number or equivalent
+	PortDst uint32
+	// TCP flags
+	TCPFlags uint32
+	// IP priority
+	Priority uint32
+}
+
+func decodeSFlowIpv6Record(data *[]byte) (SFlowIpv6Record, error) {
+	si := SFlowIpv6Record{}
+
+	*data, si.Length = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.Protocol = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.IPSrc = (*data)[16:], net.IP((*data)[:16])
+	*data, si.IPDst = (*data)[16:], net.IP((*data)[:16])
+	*data, si.PortSrc = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.PortDst = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.TCPFlags = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, si.Priority = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return si, nil
+}
+
+// **************************************************
+//  Extended IPv4 Tunnel Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 4 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelEgressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelEgress(data *[]byte) (SFlowExtendedIpv4TunnelEgressRecord, error) {
+	rec := SFlowExtendedIpv4TunnelEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended IPv4 Tunnel Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 4 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv4TunnelIngressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv4Record SFlowIpv4Record
+}
+
+func decodeExtendedIpv4TunnelIngress(data *[]byte) (SFlowExtendedIpv4TunnelIngressRecord, error) {
+	rec := SFlowExtendedIpv4TunnelIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv4Record, _ = decodeSFlowIpv4Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended IPv6 Tunnel Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 6 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelEgressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelEgress(data *[]byte) (SFlowExtendedIpv6TunnelEgressRecord, error) {
+	rec := SFlowExtendedIpv6TunnelEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended IPv6 Tunnel Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /           Packet IP version 6 Record          /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedIpv6TunnelIngressRecord struct {
+	SFlowBaseFlowRecord
+	SFlowIpv6Record
+}
+
+func decodeExtendedIpv6TunnelIngress(data *[]byte) (SFlowExtendedIpv6TunnelIngressRecord, error) {
+	rec := SFlowExtendedIpv6TunnelIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	rec.SFlowIpv6Record, _ = decodeSFlowIpv6Record(data)
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended Decapsulate Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Inner Header Offset             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateEgressRecord struct {
+	SFlowBaseFlowRecord
+	InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateEgress(data *[]byte) (SFlowExtendedDecapsulateEgressRecord, error) {
+	rec := SFlowExtendedDecapsulateEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended Decapsulate Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Inner Header Offset             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedDecapsulateIngressRecord struct {
+	SFlowBaseFlowRecord
+	InnerHeaderOffset uint32
+}
+
+func decodeExtendedDecapsulateIngress(data *[]byte) (SFlowExtendedDecapsulateIngressRecord, error) {
+	rec := SFlowExtendedDecapsulateIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.InnerHeaderOffset = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended VNI Egress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                       VNI                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniEgressRecord struct {
+	SFlowBaseFlowRecord
+	VNI uint32
+}
+
+func decodeExtendedVniEgress(data *[]byte) (SFlowExtendedVniEgressRecord, error) {
+	rec := SFlowExtendedVniEgressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Extended VNI Ingress
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                       VNI                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+type SFlowExtendedVniIngressRecord struct {
+	SFlowBaseFlowRecord
+	VNI uint32
+}
+
+func decodeExtendedVniIngress(data *[]byte) (SFlowExtendedVniIngressRecord, error) {
+	rec := SFlowExtendedVniIngressRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	rec.EnterpriseID, rec.Format = fdf.decode()
+	*data, rec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, rec.VNI = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return rec, nil
+}
+
+// **************************************************
+//  Counter Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   counter data                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowBaseCounterRecord struct {
+	EnterpriseID   SFlowEnterpriseID
+	Format         SFlowCounterRecordType
+	FlowDataLength uint32
+}
+
+func (bcr SFlowBaseCounterRecord) GetType() SFlowCounterRecordType {
+	switch bcr.Format {
+	case SFlowTypeGenericInterfaceCounters:
+		return SFlowTypeGenericInterfaceCounters
+	case SFlowTypeEthernetInterfaceCounters:
+		return SFlowTypeEthernetInterfaceCounters
+	case SFlowTypeTokenRingInterfaceCounters:
+		return SFlowTypeTokenRingInterfaceCounters
+	case SFlowType100BaseVGInterfaceCounters:
+		return SFlowType100BaseVGInterfaceCounters
+	case SFlowTypeVLANCounters:
+		return SFlowTypeVLANCounters
+	case SFlowTypeLACPCounters:
+		return SFlowTypeLACPCounters
+	case SFlowTypeProcessorCounters:
+		return SFlowTypeProcessorCounters
+	case SFlowTypeOpenflowPortCounters:
+		return SFlowTypeOpenflowPortCounters
+	case SFlowTypePORTNAMECounters:
+		return SFlowTypePORTNAMECounters
+	case SFLowTypeAPPRESOURCESCounters:
+		return SFLowTypeAPPRESOURCESCounters
+	case SFlowTypeOVSDPCounters:
+		return SFlowTypeOVSDPCounters
+	}
+	unrecognized := fmt.Sprint("Unrecognized counter record type:", bcr.Format)
+	panic(unrecognized)
+}
+
+// **************************************************
+//  Counter Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfIndex                    |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfType                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfSpeed                     |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfDirection                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfStatus                   |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IFInOctets                  |
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfInUcastPkts               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfInMulticastPkts            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfInBroadcastPkts            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfInDiscards               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    InInErrors                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfInUnknownProtos            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfOutOctets                 |
+//  |                                               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfOutUcastPkts              |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfOutMulticastPkts           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  IfOutBroadcastPkts           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   IfOutDiscards               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    IfOUtErrors                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                 IfPromiscouousMode            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowGenericInterfaceCounters struct {
+	SFlowBaseCounterRecord
+	IfIndex            uint32
+	IfType             uint32
+	IfSpeed            uint64
+	IfDirection        uint32
+	IfStatus           uint32
+	IfInOctets         uint64
+	IfInUcastPkts      uint32
+	IfInMulticastPkts  uint32
+	IfInBroadcastPkts  uint32
+	IfInDiscards       uint32
+	IfInErrors         uint32
+	IfInUnknownProtos  uint32
+	IfOutOctets        uint64
+	IfOutUcastPkts     uint32
+	IfOutMulticastPkts uint32
+	IfOutBroadcastPkts uint32
+	IfOutDiscards      uint32
+	IfOutErrors        uint32
+	IfPromiscuousMode  uint32
+}
+
+func decodeGenericInterfaceCounters(data *[]byte) (SFlowGenericInterfaceCounters, error) {
+	gic := SFlowGenericInterfaceCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	gic.EnterpriseID, gic.Format = cdf.decode()
+	*data, gic.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfIndex = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfType = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfSpeed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, gic.IfDirection = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfStatus = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, gic.IfInUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfInUnknownProtos = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutOctets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, gic.IfOutUcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutMulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutBroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutDiscards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfOutErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, gic.IfPromiscuousMode = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return gic, nil
+}
+
+// **************************************************
+//  Counter Record
+// **************************************************
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  /                   counter data                /
+//  /                                               /
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowEthernetCounters struct {
+	SFlowBaseCounterRecord
+	AlignmentErrors           uint32
+	FCSErrors                 uint32
+	SingleCollisionFrames     uint32
+	MultipleCollisionFrames   uint32
+	SQETestErrors             uint32
+	DeferredTransmissions     uint32
+	LateCollisions            uint32
+	ExcessiveCollisions       uint32
+	InternalMacTransmitErrors uint32
+	CarrierSenseErrors        uint32
+	FrameTooLongs             uint32
+	InternalMacReceiveErrors  uint32
+	SymbolErrors              uint32
+}
+
+func decodeEthernetCounters(data *[]byte) (SFlowEthernetCounters, error) {
+	ec := SFlowEthernetCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	ec.EnterpriseID, ec.Format = cdf.decode()
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.AlignmentErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.FCSErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.SingleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.MultipleCollisionFrames = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.SQETestErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.DeferredTransmissions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.LateCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.ExcessiveCollisions = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.InternalMacTransmitErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.CarrierSenseErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.FrameTooLongs = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.InternalMacReceiveErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	if len(*data) < 4 {
+		return SFlowEthernetCounters{}, errors.New("ethernet counters too small")
+	}
+	*data, ec.SymbolErrors = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return ec, nil
+}
+
+// VLAN Counter
+
+type SFlowVLANCounters struct {
+	SFlowBaseCounterRecord
+	VlanID        uint32
+	Octets        uint64
+	UcastPkts     uint32
+	MulticastPkts uint32
+	BroadcastPkts uint32
+	Discards      uint32
+}
+
+func decodeVLANCounters(data *[]byte) (SFlowVLANCounters, error) {
+	vc := SFlowVLANCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	vc.EnterpriseID, vc.Format = cdf.decode()
+	vc.EnterpriseID, vc.Format = cdf.decode()
+	*data, vc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.VlanID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.Octets = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, vc.UcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.MulticastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.BroadcastPkts = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, vc.Discards = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return vc, nil
+}
+
+//SFLLACPportState  :  SFlow LACP Port State (All(4) - 32 bit)
+type SFLLACPPortState struct {
+	PortStateAll uint32
+}
+
+//LACPcounters  :  LACP SFlow Counters  ( 64 Bytes )
+type SFlowLACPCounters struct {
+	SFlowBaseCounterRecord
+	ActorSystemID        net.HardwareAddr
+	PartnerSystemID      net.HardwareAddr
+	AttachedAggID        uint32
+	LacpPortState        SFLLACPPortState
+	LACPDUsRx            uint32
+	MarkerPDUsRx         uint32
+	MarkerResponsePDUsRx uint32
+	UnknownRx            uint32
+	IllegalRx            uint32
+	LACPDUsTx            uint32
+	MarkerPDUsTx         uint32
+	MarkerResponsePDUsTx uint32
+}
+
+func decodeLACPCounters(data *[]byte) (SFlowLACPCounters, error) {
+	la := SFlowLACPCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	la.EnterpriseID, la.Format = cdf.decode()
+	*data, la.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.ActorSystemID = (*data)[6:], (*data)[:6]
+	*data = (*data)[2:] // remove padding
+	*data, la.PartnerSystemID = (*data)[6:], (*data)[:6]
+	*data = (*data)[2:] //remove padding
+	*data, la.AttachedAggID = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.LacpPortState.PortStateAll = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.LACPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerPDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerResponsePDUsRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.UnknownRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.IllegalRx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.LACPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerPDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, la.MarkerResponsePDUsTx = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return la, nil
+
+}
+
+// **************************************************
+//  Processor Counter Record
+// **************************************************
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  counter length               |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    FiveSecCpu                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    OneMinCpu                  |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    GiveMinCpu                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                   TotalMemory                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                    FreeMemory                 |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+type SFlowProcessorCounters struct {
+	SFlowBaseCounterRecord
+	FiveSecCpu  uint32 // 5 second average CPU utilization
+	OneMinCpu   uint32 // 1 minute average CPU utilization
+	FiveMinCpu  uint32 // 5 minute average CPU utilization
+	TotalMemory uint64 // total memory (in bytes)
+	FreeMemory  uint64 // free memory (in bytes)
+}
+
+func decodeProcessorCounters(data *[]byte) (SFlowProcessorCounters, error) {
+	pc := SFlowProcessorCounters{}
+	var cdf SFlowCounterDataFormat
+	var high32, low32 uint32
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	pc.EnterpriseID, pc.Format = cdf.decode()
+	*data, pc.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	*data, pc.FiveSecCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, pc.OneMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, pc.FiveMinCpu = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	pc.TotalMemory = (uint64(high32) << 32) + uint64(low32)
+	*data, high32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, low32 = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	pc.FreeMemory = (uint64(high32)) + uint64(low32)
+
+	return pc, nil
+}
+
+// SFlowEthernetFrameFlowRecord give additional information
+// about the sampled packet if it's available.
+// An agent may or may not provide this information.
+type SFlowEthernetFrameFlowRecord struct {
+	SFlowBaseFlowRecord
+	FrameLength uint32
+	SrcMac      net.HardwareAddr
+	DstMac      net.HardwareAddr
+	Type        uint32
+}
+
+// Ethernet frame flow records have the following structure:
+
+//  0                      15                      31
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |      20 bit Interprise (0)     |12 bit format |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                  record length                |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |                Source Mac Address             |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |             Destination Mac Address           |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+//  |               Ethernet Packet Type            |
+//  +--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+--+
+
+func decodeEthernetFrameFlowRecord(data *[]byte) (SFlowEthernetFrameFlowRecord, error) {
+	es := SFlowEthernetFrameFlowRecord{}
+	var fdf SFlowFlowDataFormat
+
+	*data, fdf = (*data)[4:], SFlowFlowDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	es.EnterpriseID, es.Format = fdf.decode()
+	*data, es.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	*data, es.FrameLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, es.SrcMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+	*data, es.DstMac = (*data)[8:], net.HardwareAddr((*data)[:6])
+	*data, es.Type = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	return es, nil
+}
+
+//SFlowOpenflowPortCounters  :  OVS-Sflow OpenFlow Port Counter  ( 20 Bytes )
+type SFlowOpenflowPortCounters struct {
+	SFlowBaseCounterRecord
+	DatapathID uint64
+	PortNo     uint32
+}
+
+func decodeOpenflowportCounters(data *[]byte) (SFlowOpenflowPortCounters, error) {
+	ofp := SFlowOpenflowPortCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	ofp.EnterpriseID, ofp.Format = cdf.decode()
+	*data, ofp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, ofp.DatapathID = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, ofp.PortNo = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return ofp, nil
+}
+
+//SFlowAppresourcesCounters  :  OVS_Sflow App Resources Counter ( 48 Bytes )
+type SFlowAppresourcesCounters struct {
+	SFlowBaseCounterRecord
+	UserTime   uint32
+	SystemTime uint32
+	MemUsed    uint64
+	MemMax     uint64
+	FdOpen     uint32
+	FdMax      uint32
+	ConnOpen   uint32
+	ConnMax    uint32
+}
+
+func decodeAppresourcesCounters(data *[]byte) (SFlowAppresourcesCounters, error) {
+	app := SFlowAppresourcesCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	app.EnterpriseID, app.Format = cdf.decode()
+	*data, app.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.UserTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.SystemTime = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.MemUsed = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, app.MemMax = (*data)[8:], binary.BigEndian.Uint64((*data)[:8])
+	*data, app.FdOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.FdMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.ConnOpen = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, app.ConnMax = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return app, nil
+}
+
+//SFlowOVSDPCounters  :  OVS-Sflow DataPath Counter  ( 32 Bytes )
+type SFlowOVSDPCounters struct {
+	SFlowBaseCounterRecord
+	NHit     uint32
+	NMissed  uint32
+	NLost    uint32
+	NMaskHit uint32
+	NFlows   uint32
+	NMasks   uint32
+}
+
+func decodeOVSDPCounters(data *[]byte) (SFlowOVSDPCounters, error) {
+	dp := SFlowOVSDPCounters{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	dp.EnterpriseID, dp.Format = cdf.decode()
+	*data, dp.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NMissed = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NLost = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NMaskHit = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NFlows = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	*data, dp.NMasks = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+
+	return dp, nil
+}
+
+//SFlowPORTNAME  :  OVS-Sflow PORTNAME Counter Sampletype ( 20 Bytes )
+type SFlowPORTNAME struct {
+	SFlowBaseCounterRecord
+	Len uint32
+	Str string
+}
+
+func decodeString(data *[]byte) (len uint32, str string) {
+	*data, len = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	str = string((*data)[:len])
+	if (len % 4) != 0 {
+		len += 4 - len%4
+	}
+	*data = (*data)[len:]
+	return
+}
+
+func decodePortnameCounters(data *[]byte) (SFlowPORTNAME, error) {
+	pn := SFlowPORTNAME{}
+	var cdf SFlowCounterDataFormat
+
+	*data, cdf = (*data)[4:], SFlowCounterDataFormat(binary.BigEndian.Uint32((*data)[:4]))
+	pn.EnterpriseID, pn.Format = cdf.decode()
+	*data, pn.FlowDataLength = (*data)[4:], binary.BigEndian.Uint32((*data)[:4])
+	pn.Len, pn.Str = decodeString(data)
+
+	return pn, nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/sip.go b/vendor/github.com/google/gopacket/layers/sip.go
new file mode 100644
index 0000000..70afdb5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/sip.go
@@ -0,0 +1,542 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"bytes"
+	"fmt"
+	"io"
+	"strconv"
+	"strings"
+
+	"github.com/google/gopacket"
+)
+
+// SIPVersion defines the different versions of the SIP Protocol
+type SIPVersion uint8
+
+// Represents all the versions of SIP protocol
+const (
+	SIPVersion1 SIPVersion = 1
+	SIPVersion2 SIPVersion = 2
+)
+
+func (sv SIPVersion) String() string {
+	switch sv {
+	default:
+		// Defaulting to SIP/2.0
+		return "SIP/2.0"
+	case SIPVersion1:
+		return "SIP/1.0"
+	case SIPVersion2:
+		return "SIP/2.0"
+	}
+}
+
+// GetSIPVersion is used to get SIP version constant
+func GetSIPVersion(version string) (SIPVersion, error) {
+	switch strings.ToUpper(version) {
+	case "SIP/1.0":
+		return SIPVersion1, nil
+	case "SIP/2.0":
+		return SIPVersion2, nil
+	default:
+		return 0, fmt.Errorf("Unknown SIP version: '%s'", version)
+
+	}
+}
+
+// SIPMethod defines the different methods of the SIP Protocol
+// defined in the different RFC's
+type SIPMethod uint16
+
+// Here are all the SIP methods
+const (
+	SIPMethodInvite    SIPMethod = 1  // INVITE	[RFC3261]
+	SIPMethodAck       SIPMethod = 2  // ACK	[RFC3261]
+	SIPMethodBye       SIPMethod = 3  // BYE	[RFC3261]
+	SIPMethodCancel    SIPMethod = 4  // CANCEL	[RFC3261]
+	SIPMethodOptions   SIPMethod = 5  // OPTIONS	[RFC3261]
+	SIPMethodRegister  SIPMethod = 6  // REGISTER	[RFC3261]
+	SIPMethodPrack     SIPMethod = 7  // PRACK	[RFC3262]
+	SIPMethodSubscribe SIPMethod = 8  // SUBSCRIBE	[RFC6665]
+	SIPMethodNotify    SIPMethod = 9  // NOTIFY	[RFC6665]
+	SIPMethodPublish   SIPMethod = 10 // PUBLISH	[RFC3903]
+	SIPMethodInfo      SIPMethod = 11 // INFO	[RFC6086]
+	SIPMethodRefer     SIPMethod = 12 // REFER	[RFC3515]
+	SIPMethodMessage   SIPMethod = 13 // MESSAGE	[RFC3428]
+	SIPMethodUpdate    SIPMethod = 14 // UPDATE	[RFC3311]
+	SIPMethodPing      SIPMethod = 15 // PING	[https://tools.ietf.org/html/draft-fwmiller-ping-03]
+)
+
+func (sm SIPMethod) String() string {
+	switch sm {
+	default:
+		return "Unknown method"
+	case SIPMethodInvite:
+		return "INVITE"
+	case SIPMethodAck:
+		return "ACK"
+	case SIPMethodBye:
+		return "BYE"
+	case SIPMethodCancel:
+		return "CANCEL"
+	case SIPMethodOptions:
+		return "OPTIONS"
+	case SIPMethodRegister:
+		return "REGISTER"
+	case SIPMethodPrack:
+		return "PRACK"
+	case SIPMethodSubscribe:
+		return "SUBSCRIBE"
+	case SIPMethodNotify:
+		return "NOTIFY"
+	case SIPMethodPublish:
+		return "PUBLISH"
+	case SIPMethodInfo:
+		return "INFO"
+	case SIPMethodRefer:
+		return "REFER"
+	case SIPMethodMessage:
+		return "MESSAGE"
+	case SIPMethodUpdate:
+		return "UPDATE"
+	case SIPMethodPing:
+		return "PING"
+	}
+}
+
+// GetSIPMethod returns the constant of a SIP method
+// from its string
+func GetSIPMethod(method string) (SIPMethod, error) {
+	switch strings.ToUpper(method) {
+	case "INVITE":
+		return SIPMethodInvite, nil
+	case "ACK":
+		return SIPMethodAck, nil
+	case "BYE":
+		return SIPMethodBye, nil
+	case "CANCEL":
+		return SIPMethodCancel, nil
+	case "OPTIONS":
+		return SIPMethodOptions, nil
+	case "REGISTER":
+		return SIPMethodRegister, nil
+	case "PRACK":
+		return SIPMethodPrack, nil
+	case "SUBSCRIBE":
+		return SIPMethodSubscribe, nil
+	case "NOTIFY":
+		return SIPMethodNotify, nil
+	case "PUBLISH":
+		return SIPMethodPublish, nil
+	case "INFO":
+		return SIPMethodInfo, nil
+	case "REFER":
+		return SIPMethodRefer, nil
+	case "MESSAGE":
+		return SIPMethodMessage, nil
+	case "UPDATE":
+		return SIPMethodUpdate, nil
+	case "PING":
+		return SIPMethodPing, nil
+	default:
+		return 0, fmt.Errorf("Unknown SIP method: '%s'", method)
+	}
+}
+
+// Here is a correspondance between long header names and short
+// as defined in rfc3261 in section 20
+var compactSipHeadersCorrespondance = map[string]string{
+	"accept-contact":      "a",
+	"allow-events":        "u",
+	"call-id":             "i",
+	"contact":             "m",
+	"content-encoding":    "e",
+	"content-length":      "l",
+	"content-type":        "c",
+	"event":               "o",
+	"from":                "f",
+	"identity":            "y",
+	"refer-to":            "r",
+	"referred-by":         "b",
+	"reject-contact":      "j",
+	"request-disposition": "d",
+	"session-expires":     "x",
+	"subject":             "s",
+	"supported":           "k",
+	"to":                  "t",
+	"via":                 "v",
+}
+
+// SIP object will contains information about decoded SIP packet.
+// -> The SIP Version
+// -> The SIP Headers (in a map[string][]string because of multiple headers with the same name
+// -> The SIP Method
+// -> The SIP Response code (if it's a response)
+// -> The SIP Status line (if it's a response)
+// You can easily know the type of the packet with the IsResponse boolean
+//
+type SIP struct {
+	BaseLayer
+
+	// Base information
+	Version SIPVersion
+	Method  SIPMethod
+	Headers map[string][]string
+
+	// Request
+	RequestURI string
+
+	// Response
+	IsResponse     bool
+	ResponseCode   int
+	ResponseStatus string
+
+	// Private fields
+	cseq             int64
+	contentLength    int64
+	lastHeaderParsed string
+}
+
+// decodeSIP decodes the byte slice into a SIP type. It also
+// setups the application Layer in PacketBuilder.
+func decodeSIP(data []byte, p gopacket.PacketBuilder) error {
+	s := NewSIP()
+	err := s.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(s)
+	p.SetApplicationLayer(s)
+	return nil
+}
+
+// NewSIP instantiates a new empty SIP object
+func NewSIP() *SIP {
+	s := new(SIP)
+	s.Headers = make(map[string][]string)
+	return s
+}
+
+// LayerType returns gopacket.LayerTypeSIP.
+func (s *SIP) LayerType() gopacket.LayerType {
+	return LayerTypeSIP
+}
+
+// Payload returns the base layer payload
+func (s *SIP) Payload() []byte {
+	return s.BaseLayer.Payload
+}
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode
+func (s *SIP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSIP
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer
+func (s *SIP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// DecodeFromBytes decodes the slice into the SIP struct.
+func (s *SIP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	// Init some vars for parsing follow-up
+	var countLines int
+	var line []byte
+	var err error
+	var offset int
+
+	// Iterate on all lines of the SIP Headers
+	// and stop when we reach the SDP (aka when the new line
+	// is at index 0 of the remaining packet)
+	buffer := bytes.NewBuffer(data)
+
+	for {
+
+		// Read next line
+		line, err = buffer.ReadBytes(byte('\n'))
+		if err != nil {
+			if err == io.EOF {
+				if len(bytes.Trim(line, "\r\n")) > 0 {
+					df.SetTruncated()
+				}
+				break
+			} else {
+				return err
+			}
+		}
+		offset += len(line)
+
+		// Trim the new line delimiters
+		line = bytes.Trim(line, "\r\n")
+
+		// Empty line, we hit Body
+		if len(line) == 0 {
+			break
+		}
+
+		// First line is the SIP request/response line
+		// Other lines are headers
+		if countLines == 0 {
+			err = s.ParseFirstLine(line)
+			if err != nil {
+				return err
+			}
+
+		} else {
+			err = s.ParseHeader(line)
+			if err != nil {
+				return err
+			}
+		}
+
+		countLines++
+	}
+	s.BaseLayer = BaseLayer{Contents: data[:offset], Payload: data[offset:]}
+
+	return nil
+}
+
+// ParseFirstLine will compute the first line of a SIP packet.
+// The first line will tell us if it's a request or a response.
+//
+// Examples of first line of SIP Prococol :
+//
+// 	Request 	: INVITE bob@example.com SIP/2.0
+// 	Response 	: SIP/2.0 200 OK
+// 	Response	: SIP/2.0 501 Not Implemented
+//
+func (s *SIP) ParseFirstLine(firstLine []byte) error {
+
+	var err error
+
+	// Splits line by space
+	splits := strings.SplitN(string(firstLine), " ", 3)
+
+	// We must have at least 3 parts
+	if len(splits) < 3 {
+		return fmt.Errorf("invalid first SIP line: '%s'", string(firstLine))
+	}
+
+	// Determine the SIP packet type
+	if strings.HasPrefix(splits[0], "SIP") {
+
+		// --> Response
+		s.IsResponse = true
+
+		// Validate SIP Version
+		s.Version, err = GetSIPVersion(splits[0])
+		if err != nil {
+			return err
+		}
+
+		// Compute code
+		s.ResponseCode, err = strconv.Atoi(splits[1])
+		if err != nil {
+			return err
+		}
+
+		// Compute status line
+		s.ResponseStatus = splits[2]
+
+	} else {
+
+		// --> Request
+
+		// Validate method
+		s.Method, err = GetSIPMethod(splits[0])
+		if err != nil {
+			return err
+		}
+
+		s.RequestURI = splits[1]
+
+		// Validate SIP Version
+		s.Version, err = GetSIPVersion(splits[2])
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ParseHeader will parse a SIP Header
+// SIP Headers are quite simple, there are colon separated name and value
+// Headers can be spread over multiple lines
+//
+// Examples of header :
+//
+//  CSeq: 1 REGISTER
+//  Via: SIP/2.0/UDP there.com:5060
+//  Authorization:Digest username="UserB",
+//	  realm="MCI WorldCom SIP",
+//    nonce="1cec4341ae6cbe5a359ea9c8e88df84f", opaque="",
+//    uri="sip:ss2.wcom.com", response="71ba27c64bd01de719686aa4590d5824"
+//
+func (s *SIP) ParseHeader(header []byte) (err error) {
+
+	// Ignore empty headers
+	if len(header) == 0 {
+		return
+	}
+
+	// Check if this is the following of last header
+	// RFC 3261 - 7.3.1 - Header Field Format specify that following lines of
+	// multiline headers must begin by SP or TAB
+	if header[0] == '\t' || header[0] == ' ' {
+
+		header = bytes.TrimSpace(header)
+		s.Headers[s.lastHeaderParsed][len(s.Headers[s.lastHeaderParsed])-1] += fmt.Sprintf(" %s", string(header))
+		return
+	}
+
+	// Find the ':' to separate header name and value
+	index := bytes.Index(header, []byte(":"))
+	if index >= 0 {
+
+		headerName := strings.ToLower(string(bytes.Trim(header[:index], " ")))
+		headerValue := string(bytes.Trim(header[index+1:], " "))
+
+		// Add header to object
+		s.Headers[headerName] = append(s.Headers[headerName], headerValue)
+		s.lastHeaderParsed = headerName
+
+		// Compute specific headers
+		err = s.ParseSpecificHeaders(headerName, headerValue)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// ParseSpecificHeaders will parse some specific key values from
+// specific headers like CSeq or Content-Length integer values
+func (s *SIP) ParseSpecificHeaders(headerName string, headerValue string) (err error) {
+
+	switch headerName {
+	case "cseq":
+
+		// CSeq header value is formatted like that :
+		// CSeq: 123 INVITE
+		// We split the value to parse Cseq integer value, and method
+		splits := strings.Split(headerValue, " ")
+		if len(splits) > 1 {
+
+			// Parse Cseq
+			s.cseq, err = strconv.ParseInt(splits[0], 10, 64)
+			if err != nil {
+				return err
+			}
+
+			// Validate method
+			if s.IsResponse {
+				s.Method, err = GetSIPMethod(splits[1])
+				if err != nil {
+					return err
+				}
+			}
+		}
+
+	case "content-length":
+
+		// Parse Content-Length
+		s.contentLength, err = strconv.ParseInt(headerValue, 10, 64)
+		if err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// GetAllHeaders will return the full headers of the
+// current SIP packets in a map[string][]string
+func (s *SIP) GetAllHeaders() map[string][]string {
+	return s.Headers
+}
+
+// GetHeader will return all the headers with
+// the specified name.
+func (s *SIP) GetHeader(headerName string) []string {
+	headerName = strings.ToLower(headerName)
+	h := make([]string, 0)
+	if _, ok := s.Headers[headerName]; ok {
+		return s.Headers[headerName]
+	}
+	compactHeader := compactSipHeadersCorrespondance[headerName]
+	if _, ok := s.Headers[compactHeader]; ok {
+		return s.Headers[compactHeader]
+	}
+	return h
+}
+
+// GetFirstHeader will return the first header with
+// the specified name. If the current SIP packet has multiple
+// headers with the same name, it returns the first.
+func (s *SIP) GetFirstHeader(headerName string) string {
+	headers := s.GetHeader(headerName)
+	if len(headers) > 0 {
+		return headers[0]
+	}
+	return ""
+}
+
+//
+// Some handy getters for most used SIP headers
+//
+
+// GetAuthorization will return the Authorization
+// header of the current SIP packet
+func (s *SIP) GetAuthorization() string {
+	return s.GetFirstHeader("Authorization")
+}
+
+// GetFrom will return the From
+// header of the current SIP packet
+func (s *SIP) GetFrom() string {
+	return s.GetFirstHeader("From")
+}
+
+// GetTo will return the To
+// header of the current SIP packet
+func (s *SIP) GetTo() string {
+	return s.GetFirstHeader("To")
+}
+
+// GetContact will return the Contact
+// header of the current SIP packet
+func (s *SIP) GetContact() string {
+	return s.GetFirstHeader("Contact")
+}
+
+// GetCallID will return the Call-ID
+// header of the current SIP packet
+func (s *SIP) GetCallID() string {
+	return s.GetFirstHeader("Call-ID")
+}
+
+// GetUserAgent will return the User-Agent
+// header of the current SIP packet
+func (s *SIP) GetUserAgent() string {
+	return s.GetFirstHeader("User-Agent")
+}
+
+// GetContentLength will return the parsed integer
+// Content-Length header of the current SIP packet
+func (s *SIP) GetContentLength() int64 {
+	return s.contentLength
+}
+
+// GetCSeq will return the parsed integer CSeq header
+// header of the current SIP packet
+func (s *SIP) GetCSeq() int64 {
+	return s.cseq
+}
diff --git a/vendor/github.com/google/gopacket/layers/stp.go b/vendor/github.com/google/gopacket/layers/stp.go
new file mode 100644
index 0000000..31c2a6e
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/stp.go
@@ -0,0 +1,150 @@
+// Copyright 2017 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+type STPSwitchID struct {
+	Priority uint16 // Bridge priority
+	SysID    uint16 // VLAN ID
+	HwAddr   net.HardwareAddr
+}
+
+// STP decode spanning tree protocol packets to transport BPDU (bridge protocol data unit) message.
+type STP struct {
+	BaseLayer
+	ProtocolID        uint16
+	Version           uint8
+	Type              uint8
+	TC, TCA           bool // TC: Topologie change ; TCA: Topologie change ack
+	RouteID, BridgeID STPSwitchID
+	Cost              uint32
+	PortID            uint16
+	MessageAge        uint16
+	MaxAge            uint16
+	HelloTime         uint16
+	FDelay            uint16
+}
+
+// LayerType returns gopacket.LayerTypeSTP.
+func (s *STP) LayerType() gopacket.LayerType { return LayerTypeSTP }
+
+// CanDecode returns the set of layer types that this DecodingLayer can decode.
+func (s *STP) CanDecode() gopacket.LayerClass {
+	return LayerTypeSTP
+}
+
+// DecodeFromBytes decodes the given bytes into this layer.
+func (stp *STP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	stpLength := 35
+	if len(data) < stpLength {
+		df.SetTruncated()
+		return fmt.Errorf("STP length %d too short", len(data))
+	}
+
+	stp.ProtocolID = binary.BigEndian.Uint16(data[:2])
+	stp.Version = uint8(data[2])
+	stp.Type = uint8(data[3])
+	stp.TC = data[4]&0x01 != 0
+	stp.TCA = data[4]&0x80 != 0
+	stp.RouteID.Priority = binary.BigEndian.Uint16(data[5:7]) & 0xf000
+	stp.RouteID.SysID = binary.BigEndian.Uint16(data[5:7]) & 0x0fff
+	stp.RouteID.HwAddr = net.HardwareAddr(data[7:13])
+	stp.Cost = binary.BigEndian.Uint32(data[13:17])
+	stp.BridgeID.Priority = binary.BigEndian.Uint16(data[17:19]) & 0xf000
+	stp.BridgeID.SysID = binary.BigEndian.Uint16(data[17:19]) & 0x0fff
+	stp.BridgeID.HwAddr = net.HardwareAddr(data[19:25])
+	stp.PortID = binary.BigEndian.Uint16(data[25:27])
+	stp.MessageAge = binary.BigEndian.Uint16(data[27:29])
+	stp.MaxAge = binary.BigEndian.Uint16(data[29:31])
+	stp.HelloTime = binary.BigEndian.Uint16(data[31:33])
+	stp.FDelay = binary.BigEndian.Uint16(data[33:35])
+	stp.Contents = data[:stpLength]
+	stp.Payload = data[stpLength:]
+
+	return nil
+}
+
+// NextLayerType returns the layer type contained by this DecodingLayer.
+func (stp *STP) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+// Check if the priority value is correct.
+func checkPriority(prio uint16) (uint16, error) {
+	if prio == 0 {
+		return prio, errors.New("Invalid Priority value must be in the rage <4096-61440> with an increment of 4096")
+	}
+	if prio%4096 == 0 {
+		return prio, nil
+	} else {
+		return prio, errors.New("Invalid Priority value must be in the rage <4096-61440> with an increment of 4096")
+	}
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (s *STP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var flags uint8 = 0x00
+	bytes, err := b.PrependBytes(35)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, s.ProtocolID)
+	bytes[2] = s.Version
+	bytes[3] = s.Type
+	if s.TC {
+		flags |= 0x01
+	}
+	if s.TCA {
+		flags |= 0x80
+	}
+	bytes[4] = flags
+
+	prioRoot, err := checkPriority(s.RouteID.Priority)
+	if err != nil {
+		panic(err)
+	}
+	if s.RouteID.SysID >= 4096 {
+		panic("Invalid VlanID value ..!")
+	}
+	binary.BigEndian.PutUint16(bytes[5:7], prioRoot|s.RouteID.SysID)
+	copy(bytes[7:13], s.RouteID.HwAddr)
+
+	binary.BigEndian.PutUint32(bytes[13:17], s.Cost)
+
+	prioBridge, err := checkPriority(s.BridgeID.Priority)
+	if err != nil {
+		panic(err)
+	}
+	if s.BridgeID.SysID >= 4096 {
+		panic("Invalid VlanID value ..!")
+	}
+	binary.BigEndian.PutUint16(bytes[17:19], prioBridge|s.BridgeID.SysID)
+	copy(bytes[19:25], s.BridgeID.HwAddr)
+
+	binary.BigEndian.PutUint16(bytes[25:27], s.PortID)
+	binary.BigEndian.PutUint16(bytes[27:29], s.MessageAge)
+	binary.BigEndian.PutUint16(bytes[29:31], s.MaxAge)
+	binary.BigEndian.PutUint16(bytes[31:33], s.HelloTime)
+	binary.BigEndian.PutUint16(bytes[33:35], s.FDelay)
+
+	return nil
+}
+
+func decodeSTP(data []byte, p gopacket.PacketBuilder) error {
+	stp := &STP{}
+	return decodingLayerDecoder(stp, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcp.go b/vendor/github.com/google/gopacket/layers/tcp.go
new file mode 100644
index 0000000..bcdeb4b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcp.go
@@ -0,0 +1,341 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"encoding/hex"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// TCP is the layer for TCP headers.
+type TCP struct {
+	BaseLayer
+	SrcPort, DstPort                           TCPPort
+	Seq                                        uint32
+	Ack                                        uint32
+	DataOffset                                 uint8
+	FIN, SYN, RST, PSH, ACK, URG, ECE, CWR, NS bool
+	Window                                     uint16
+	Checksum                                   uint16
+	Urgent                                     uint16
+	sPort, dPort                               []byte
+	Options                                    []TCPOption
+	Padding                                    []byte
+	opts                                       [4]TCPOption
+	tcpipchecksum
+}
+
+// TCPOptionKind represents a TCP option code.
+type TCPOptionKind uint8
+
+const (
+	TCPOptionKindEndList                         = 0
+	TCPOptionKindNop                             = 1
+	TCPOptionKindMSS                             = 2  // len = 4
+	TCPOptionKindWindowScale                     = 3  // len = 3
+	TCPOptionKindSACKPermitted                   = 4  // len = 2
+	TCPOptionKindSACK                            = 5  // len = n
+	TCPOptionKindEcho                            = 6  // len = 6, obsolete
+	TCPOptionKindEchoReply                       = 7  // len = 6, obsolete
+	TCPOptionKindTimestamps                      = 8  // len = 10
+	TCPOptionKindPartialOrderConnectionPermitted = 9  // len = 2, obsolete
+	TCPOptionKindPartialOrderServiceProfile      = 10 // len = 3, obsolete
+	TCPOptionKindCC                              = 11 // obsolete
+	TCPOptionKindCCNew                           = 12 // obsolete
+	TCPOptionKindCCEcho                          = 13 // obsolete
+	TCPOptionKindAltChecksum                     = 14 // len = 3, obsolete
+	TCPOptionKindAltChecksumData                 = 15 // len = n, obsolete
+)
+
+func (k TCPOptionKind) String() string {
+	switch k {
+	case TCPOptionKindEndList:
+		return "EndList"
+	case TCPOptionKindNop:
+		return "NOP"
+	case TCPOptionKindMSS:
+		return "MSS"
+	case TCPOptionKindWindowScale:
+		return "WindowScale"
+	case TCPOptionKindSACKPermitted:
+		return "SACKPermitted"
+	case TCPOptionKindSACK:
+		return "SACK"
+	case TCPOptionKindEcho:
+		return "Echo"
+	case TCPOptionKindEchoReply:
+		return "EchoReply"
+	case TCPOptionKindTimestamps:
+		return "Timestamps"
+	case TCPOptionKindPartialOrderConnectionPermitted:
+		return "PartialOrderConnectionPermitted"
+	case TCPOptionKindPartialOrderServiceProfile:
+		return "PartialOrderServiceProfile"
+	case TCPOptionKindCC:
+		return "CC"
+	case TCPOptionKindCCNew:
+		return "CCNew"
+	case TCPOptionKindCCEcho:
+		return "CCEcho"
+	case TCPOptionKindAltChecksum:
+		return "AltChecksum"
+	case TCPOptionKindAltChecksumData:
+		return "AltChecksumData"
+	default:
+		return fmt.Sprintf("Unknown(%d)", k)
+	}
+}
+
+type TCPOption struct {
+	OptionType   TCPOptionKind
+	OptionLength uint8
+	OptionData   []byte
+}
+
+func (t TCPOption) String() string {
+	hd := hex.EncodeToString(t.OptionData)
+	if len(hd) > 0 {
+		hd = " 0x" + hd
+	}
+	switch t.OptionType {
+	case TCPOptionKindMSS:
+		if len(t.OptionData) >= 2 {
+			return fmt.Sprintf("TCPOption(%s:%v%s)",
+				t.OptionType,
+				binary.BigEndian.Uint16(t.OptionData),
+				hd)
+		}
+
+	case TCPOptionKindTimestamps:
+		if len(t.OptionData) == 8 {
+			return fmt.Sprintf("TCPOption(%s:%v/%v%s)",
+				t.OptionType,
+				binary.BigEndian.Uint32(t.OptionData[:4]),
+				binary.BigEndian.Uint32(t.OptionData[4:8]),
+				hd)
+		}
+	}
+	return fmt.Sprintf("TCPOption(%s:%s)", t.OptionType, hd)
+}
+
+// LayerType returns gopacket.LayerTypeTCP
+func (t *TCP) LayerType() gopacket.LayerType { return LayerTypeTCP }
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (t *TCP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var optionLength int
+	for _, o := range t.Options {
+		switch o.OptionType {
+		case 0, 1:
+			optionLength += 1
+		default:
+			optionLength += 2 + len(o.OptionData)
+		}
+	}
+	if opts.FixLengths {
+		if rem := optionLength % 4; rem != 0 {
+			t.Padding = lotsOfZeros[:4-rem]
+		}
+		t.DataOffset = uint8((len(t.Padding) + optionLength + 20) / 4)
+	}
+	bytes, err := b.PrependBytes(20 + optionLength + len(t.Padding))
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(t.SrcPort))
+	binary.BigEndian.PutUint16(bytes[2:], uint16(t.DstPort))
+	binary.BigEndian.PutUint32(bytes[4:], t.Seq)
+	binary.BigEndian.PutUint32(bytes[8:], t.Ack)
+	binary.BigEndian.PutUint16(bytes[12:], t.flagsAndOffset())
+	binary.BigEndian.PutUint16(bytes[14:], t.Window)
+	binary.BigEndian.PutUint16(bytes[18:], t.Urgent)
+	start := 20
+	for _, o := range t.Options {
+		bytes[start] = byte(o.OptionType)
+		switch o.OptionType {
+		case 0, 1:
+			start++
+		default:
+			if opts.FixLengths {
+				o.OptionLength = uint8(len(o.OptionData) + 2)
+			}
+			bytes[start+1] = o.OptionLength
+			copy(bytes[start+2:start+len(o.OptionData)+2], o.OptionData)
+			start += len(o.OptionData) + 2
+		}
+	}
+	copy(bytes[start:], t.Padding)
+	if opts.ComputeChecksums {
+		// zero out checksum bytes in current serialization.
+		bytes[16] = 0
+		bytes[17] = 0
+		csum, err := t.computeChecksum(b.Bytes(), IPProtocolTCP)
+		if err != nil {
+			return err
+		}
+		t.Checksum = csum
+	}
+	binary.BigEndian.PutUint16(bytes[16:], t.Checksum)
+	return nil
+}
+
+func (t *TCP) ComputeChecksum() (uint16, error) {
+	return t.computeChecksum(append(t.Contents, t.Payload...), IPProtocolTCP)
+}
+
+func (t *TCP) flagsAndOffset() uint16 {
+	f := uint16(t.DataOffset) << 12
+	if t.FIN {
+		f |= 0x0001
+	}
+	if t.SYN {
+		f |= 0x0002
+	}
+	if t.RST {
+		f |= 0x0004
+	}
+	if t.PSH {
+		f |= 0x0008
+	}
+	if t.ACK {
+		f |= 0x0010
+	}
+	if t.URG {
+		f |= 0x0020
+	}
+	if t.ECE {
+		f |= 0x0040
+	}
+	if t.CWR {
+		f |= 0x0080
+	}
+	if t.NS {
+		f |= 0x0100
+	}
+	return f
+}
+
+func (tcp *TCP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 20 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid TCP header. Length %d less than 20", len(data))
+	}
+	tcp.SrcPort = TCPPort(binary.BigEndian.Uint16(data[0:2]))
+	tcp.sPort = data[0:2]
+	tcp.DstPort = TCPPort(binary.BigEndian.Uint16(data[2:4]))
+	tcp.dPort = data[2:4]
+	tcp.Seq = binary.BigEndian.Uint32(data[4:8])
+	tcp.Ack = binary.BigEndian.Uint32(data[8:12])
+	tcp.DataOffset = data[12] >> 4
+	tcp.FIN = data[13]&0x01 != 0
+	tcp.SYN = data[13]&0x02 != 0
+	tcp.RST = data[13]&0x04 != 0
+	tcp.PSH = data[13]&0x08 != 0
+	tcp.ACK = data[13]&0x10 != 0
+	tcp.URG = data[13]&0x20 != 0
+	tcp.ECE = data[13]&0x40 != 0
+	tcp.CWR = data[13]&0x80 != 0
+	tcp.NS = data[12]&0x01 != 0
+	tcp.Window = binary.BigEndian.Uint16(data[14:16])
+	tcp.Checksum = binary.BigEndian.Uint16(data[16:18])
+	tcp.Urgent = binary.BigEndian.Uint16(data[18:20])
+	if tcp.Options == nil {
+		// Pre-allocate to avoid allocating a slice.
+		tcp.Options = tcp.opts[:0]
+	} else {
+		tcp.Options = tcp.Options[:0]
+	}
+	tcp.Padding = tcp.Padding[:0]
+	if tcp.DataOffset < 5 {
+		return fmt.Errorf("Invalid TCP data offset %d < 5", tcp.DataOffset)
+	}
+	dataStart := int(tcp.DataOffset) * 4
+	if dataStart > len(data) {
+		df.SetTruncated()
+		tcp.Payload = nil
+		tcp.Contents = data
+		return errors.New("TCP data offset greater than packet length")
+	}
+	tcp.Contents = data[:dataStart]
+	tcp.Payload = data[dataStart:]
+	// From here on, data points just to the header options.
+	data = data[20:dataStart]
+OPTIONS:
+	for len(data) > 0 {
+		tcp.Options = append(tcp.Options, TCPOption{OptionType: TCPOptionKind(data[0])})
+		opt := &tcp.Options[len(tcp.Options)-1]
+		switch opt.OptionType {
+		case TCPOptionKindEndList: // End of options
+			opt.OptionLength = 1
+			tcp.Padding = data[1:]
+			break OPTIONS
+		case TCPOptionKindNop: // 1 byte padding
+			opt.OptionLength = 1
+		default:
+			if len(data) < 2 {
+				df.SetTruncated()
+				return fmt.Errorf("Invalid TCP option length. Length %d less than 2", len(data))
+			}
+			opt.OptionLength = data[1]
+			if opt.OptionLength < 2 {
+				return fmt.Errorf("Invalid TCP option length %d < 2", opt.OptionLength)
+			} else if int(opt.OptionLength) > len(data) {
+				df.SetTruncated()
+				return fmt.Errorf("Invalid TCP option length %d exceeds remaining %d bytes", opt.OptionLength, len(data))
+			}
+			opt.OptionData = data[2:opt.OptionLength]
+		}
+		data = data[opt.OptionLength:]
+	}
+	return nil
+}
+
+func (t *TCP) CanDecode() gopacket.LayerClass {
+	return LayerTypeTCP
+}
+
+func (t *TCP) NextLayerType() gopacket.LayerType {
+	lt := t.DstPort.LayerType()
+	if lt == gopacket.LayerTypePayload {
+		lt = t.SrcPort.LayerType()
+	}
+	return lt
+}
+
+func decodeTCP(data []byte, p gopacket.PacketBuilder) error {
+	tcp := &TCP{}
+	err := tcp.DecodeFromBytes(data, p)
+	p.AddLayer(tcp)
+	p.SetTransportLayer(tcp)
+	if err != nil {
+		return err
+	}
+	if p.DecodeOptions().DecodeStreamsAsDatagrams {
+		return p.NextDecoder(tcp.NextLayerType())
+	} else {
+		return p.NextDecoder(gopacket.LayerTypePayload)
+	}
+}
+
+func (t *TCP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointTCPPort, t.sPort, t.dPort)
+}
+
+// For testing only
+func (t *TCP) SetInternalPortsForTesting() {
+	t.sPort = make([]byte, 2)
+	t.dPort = make([]byte, 2)
+	binary.BigEndian.PutUint16(t.sPort, uint16(t.SrcPort))
+	binary.BigEndian.PutUint16(t.dPort, uint16(t.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/tcpip.go b/vendor/github.com/google/gopacket/layers/tcpip.go
new file mode 100644
index 0000000..64ba51c
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tcpip.go
@@ -0,0 +1,104 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// Checksum computation for TCP/UDP.
+type tcpipchecksum struct {
+	pseudoheader tcpipPseudoHeader
+}
+
+type tcpipPseudoHeader interface {
+	pseudoheaderChecksum() (uint32, error)
+}
+
+func (ip *IPv4) pseudoheaderChecksum() (csum uint32, err error) {
+	if err := ip.AddressTo4(); err != nil {
+		return 0, err
+	}
+	csum += (uint32(ip.SrcIP[0]) + uint32(ip.SrcIP[2])) << 8
+	csum += uint32(ip.SrcIP[1]) + uint32(ip.SrcIP[3])
+	csum += (uint32(ip.DstIP[0]) + uint32(ip.DstIP[2])) << 8
+	csum += uint32(ip.DstIP[1]) + uint32(ip.DstIP[3])
+	return csum, nil
+}
+
+func (ip *IPv6) pseudoheaderChecksum() (csum uint32, err error) {
+	if err := ip.AddressTo16(); err != nil {
+		return 0, err
+	}
+	for i := 0; i < 16; i += 2 {
+		csum += uint32(ip.SrcIP[i]) << 8
+		csum += uint32(ip.SrcIP[i+1])
+		csum += uint32(ip.DstIP[i]) << 8
+		csum += uint32(ip.DstIP[i+1])
+	}
+	return csum, nil
+}
+
+// Calculate the TCP/IP checksum defined in rfc1071.  The passed-in csum is any
+// initial checksum data that's already been computed.
+func tcpipChecksum(data []byte, csum uint32) uint16 {
+	// to handle odd lengths, we loop to length - 1, incrementing by 2, then
+	// handle the last byte specifically by checking against the original
+	// length.
+	length := len(data) - 1
+	for i := 0; i < length; i += 2 {
+		// For our test packet, doing this manually is about 25% faster
+		// (740 ns vs. 1000ns) than doing it by calling binary.BigEndian.Uint16.
+		csum += uint32(data[i]) << 8
+		csum += uint32(data[i+1])
+	}
+	if len(data)%2 == 1 {
+		csum += uint32(data[length]) << 8
+	}
+	for csum > 0xffff {
+		csum = (csum >> 16) + (csum & 0xffff)
+	}
+	return ^uint16(csum)
+}
+
+// computeChecksum computes a TCP or UDP checksum.  headerAndPayload is the
+// serialized TCP or UDP header plus its payload, with the checksum zero'd
+// out. headerProtocol is the IP protocol number of the upper-layer header.
+func (c *tcpipchecksum) computeChecksum(headerAndPayload []byte, headerProtocol IPProtocol) (uint16, error) {
+	if c.pseudoheader == nil {
+		return 0, errors.New("TCP/IP layer 4 checksum cannot be computed without network layer... call SetNetworkLayerForChecksum to set which layer to use")
+	}
+	length := uint32(len(headerAndPayload))
+	csum, err := c.pseudoheader.pseudoheaderChecksum()
+	if err != nil {
+		return 0, err
+	}
+	csum += uint32(headerProtocol)
+	csum += length & 0xffff
+	csum += length >> 16
+	return tcpipChecksum(headerAndPayload, csum), nil
+}
+
+// SetNetworkLayerForChecksum tells this layer which network layer is wrapping it.
+// This is needed for computing the checksum when serializing, since TCP/IP transport
+// layer checksums depends on fields in the IPv4 or IPv6 layer that contains it.
+// The passed in layer must be an *IPv4 or *IPv6.
+func (i *tcpipchecksum) SetNetworkLayerForChecksum(l gopacket.NetworkLayer) error {
+	switch v := l.(type) {
+	case *IPv4:
+		i.pseudoheader = v
+	case *IPv6:
+		i.pseudoheader = v
+	default:
+		return fmt.Errorf("cannot use layer type %v for tcp checksum network layer", l.LayerType())
+	}
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/test_creator.py b/vendor/github.com/google/gopacket/layers/test_creator.py
new file mode 100644
index 0000000..c92d276
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/test_creator.py
@@ -0,0 +1,103 @@
+#!/usr/bin/python
+# Copyright 2012 Google, Inc. All rights reserved.
+
+"""TestCreator creates test templates from pcap files."""
+
+import argparse
+import base64
+import glob
+import re
+import string
+import subprocess
+import sys
+
+
+class Packet(object):
+  """Helper class encapsulating packet from a pcap file."""
+
+  def __init__(self, packet_lines):
+    self.packet_lines = packet_lines
+    self.data = self._DecodeText(packet_lines)
+
+  @classmethod
+  def _DecodeText(cls, packet_lines):
+    packet_bytes = []
+    # First line is timestamp and stuff, skip it.
+    # Format: 0x0010:  0000 0020 3aff 3ffe 0000 0000 0000 0000  ....:.?.........
+
+    for line in packet_lines[1:]:
+      m = re.match(r'\s+0x[a-f\d]+:\s+((?:[\da-f]{2,4}\s)*)', line, re.IGNORECASE)
+      if m is None: continue
+      for hexpart in m.group(1).split():
+        packet_bytes.append(base64.b16decode(hexpart.upper()))
+    return ''.join(packet_bytes)
+
+  def Test(self, name, link_type):
+    """Yields a test using this packet, as a set of lines."""
+    yield '// testPacket%s is the packet:' % name
+    for line in self.packet_lines:
+      yield '//   ' + line
+    yield 'var testPacket%s = []byte{' % name
+    data = list(self.data)
+    while data:
+      linebytes, data = data[:16], data[16:]
+      yield ''.join(['\t'] + ['0x%02x, ' % ord(c) for c in linebytes])
+    yield '}'
+    yield 'func TestPacket%s(t *testing.T) {' % name
+    yield '\tp := gopacket.NewPacket(testPacket%s, LinkType%s, gopacket.Default)' % (name, link_type)
+    yield '\tif p.ErrorLayer() != nil {'
+    yield '\t\tt.Error("Failed to decode packet:", p.ErrorLayer().Error())'
+    yield '\t}'
+    yield '\tcheckLayers(p, []gopacket.LayerType{LayerType%s, FILL_ME_IN_WITH_ACTUAL_LAYERS}, t)' % link_type
+    yield '}'
+    yield 'func BenchmarkDecodePacket%s(b *testing.B) {' % name
+    yield '\tfor i := 0; i < b.N; i++ {'
+    yield '\t\tgopacket.NewPacket(testPacket%s, LinkType%s, gopacket.NoCopy)' % (name, link_type)
+    yield '\t}'
+    yield '}'
+
+
+
+def GetTcpdumpOutput(filename):
+  """Runs tcpdump on the given file, returning output as string."""
+  return subprocess.check_output(
+      ['tcpdump', '-XX', '-s', '0', '-n', '-r', filename])
+
+
+def TcpdumpOutputToPackets(output):
+  """Reads a pcap file with TCPDump, yielding Packet objects."""
+  pdata = []
+  for line in output.splitlines():
+    if line[0] not in string.whitespace and pdata:
+      yield Packet(pdata)
+      pdata = []
+    pdata.append(line)
+  if pdata:
+    yield Packet(pdata)
+
+
+def main():
+  class CustomHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
+    def _format_usage(self, usage, actions, groups, prefix=None):
+      header =('TestCreator creates gopacket tests using a pcap file.\n\n'
+               'Tests are written to standard out... they can then be \n'
+               'copied into the file of your choice and modified as \n'
+               'you see.\n\n')
+      return header + argparse.ArgumentDefaultsHelpFormatter._format_usage(
+        self, usage, actions, groups, prefix)
+
+  parser = argparse.ArgumentParser(formatter_class=CustomHelpFormatter)
+  parser.add_argument('--link_type', default='Ethernet', help='the link type (default: %(default)s)')
+  parser.add_argument('--name', default='Packet%d', help='the layer type, must have "%d" inside it')
+  parser.add_argument('files', metavar='file.pcap', type=str, nargs='+', help='the files to process')
+
+  args = parser.parse_args()
+
+  for arg in args.files:
+    for path in glob.glob(arg):
+      for i, packet in enumerate(TcpdumpOutputToPackets(GetTcpdumpOutput(path))):
+        print '\n'.join(packet.Test(
+          args.name % i, args.link_type))
+
+if __name__ == '__main__':
+    main()
diff --git a/vendor/github.com/google/gopacket/layers/tls.go b/vendor/github.com/google/gopacket/layers/tls.go
new file mode 100644
index 0000000..5a155d4
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls.go
@@ -0,0 +1,283 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// TLSType defines the type of data after the TLS Record
+type TLSType uint8
+
+// TLSType known values.
+const (
+	TLSChangeCipherSpec TLSType = 20
+	TLSAlert            TLSType = 21
+	TLSHandshake        TLSType = 22
+	TLSApplicationData  TLSType = 23
+	TLSUnknown          TLSType = 255
+)
+
+// String shows the register type nicely formatted
+func (tt TLSType) String() string {
+	switch tt {
+	default:
+		return "Unknown"
+	case TLSChangeCipherSpec:
+		return "Change Cipher Spec"
+	case TLSAlert:
+		return "Alert"
+	case TLSHandshake:
+		return "Handshake"
+	case TLSApplicationData:
+		return "Application Data"
+	}
+}
+
+// TLSVersion represents the TLS version in numeric format
+type TLSVersion uint16
+
+// Strings shows the TLS version nicely formatted
+func (tv TLSVersion) String() string {
+	switch tv {
+	default:
+		return "Unknown"
+	case 0x0200:
+		return "SSL 2.0"
+	case 0x0300:
+		return "SSL 3.0"
+	case 0x0301:
+		return "TLS 1.0"
+	case 0x0302:
+		return "TLS 1.1"
+	case 0x0303:
+		return "TLS 1.2"
+	case 0x0304:
+		return "TLS 1.3"
+	}
+}
+
+// TLS is specified in RFC 5246
+//
+//  TLS Record Protocol
+//  0  1  2  3  4  5  6  7  8
+//  +--+--+--+--+--+--+--+--+
+//  |     Content Type      |
+//  +--+--+--+--+--+--+--+--+
+//  |    Version (major)    |
+//  +--+--+--+--+--+--+--+--+
+//  |    Version (minor)    |
+//  +--+--+--+--+--+--+--+--+
+//  |        Length         |
+//  +--+--+--+--+--+--+--+--+
+//  |        Length         |
+//  +--+--+--+--+--+--+--+--+
+
+// TLS is actually a slide of TLSrecord structures
+type TLS struct {
+	BaseLayer
+
+	// TLS Records
+	ChangeCipherSpec []TLSChangeCipherSpecRecord
+	Handshake        []TLSHandshakeRecord
+	AppData          []TLSAppDataRecord
+	Alert            []TLSAlertRecord
+}
+
+// TLSRecordHeader contains all the information that each TLS Record types should have
+type TLSRecordHeader struct {
+	ContentType TLSType
+	Version     TLSVersion
+	Length      uint16
+}
+
+// LayerType returns gopacket.LayerTypeTLS.
+func (t *TLS) LayerType() gopacket.LayerType { return LayerTypeTLS }
+
+// decodeTLS decodes the byte slice into a TLS type. It also
+// setups the application Layer in PacketBuilder.
+func decodeTLS(data []byte, p gopacket.PacketBuilder) error {
+	t := &TLS{}
+	err := t.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+	p.AddLayer(t)
+	p.SetApplicationLayer(t)
+	return nil
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLS) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	t.BaseLayer.Contents = data
+	t.BaseLayer.Payload = nil
+
+	t.ChangeCipherSpec = t.ChangeCipherSpec[:0]
+	t.Handshake = t.Handshake[:0]
+	t.AppData = t.AppData[:0]
+	t.Alert = t.Alert[:0]
+
+	return t.decodeTLSRecords(data, df)
+}
+
+func (t *TLS) decodeTLSRecords(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 5 {
+		df.SetTruncated()
+		return errors.New("TLS record too short")
+	}
+
+	// since there are no further layers, the baselayer's content is
+	// pointing to this layer
+	// TODO: Consider removing this
+	t.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+
+	var h TLSRecordHeader
+	h.ContentType = TLSType(data[0])
+	h.Version = TLSVersion(binary.BigEndian.Uint16(data[1:3]))
+	h.Length = binary.BigEndian.Uint16(data[3:5])
+
+	if h.ContentType.String() == "Unknown" {
+		return errors.New("Unknown TLS record type")
+	}
+
+	hl := 5 // header length
+	tl := hl + int(h.Length)
+	if len(data) < tl {
+		df.SetTruncated()
+		return errors.New("TLS packet length mismatch")
+	}
+
+	switch h.ContentType {
+	default:
+		return errors.New("Unknown TLS record type")
+	case TLSChangeCipherSpec:
+		var r TLSChangeCipherSpecRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.ChangeCipherSpec = append(t.ChangeCipherSpec, r)
+	case TLSAlert:
+		var r TLSAlertRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.Alert = append(t.Alert, r)
+	case TLSHandshake:
+		var r TLSHandshakeRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.Handshake = append(t.Handshake, r)
+	case TLSApplicationData:
+		var r TLSAppDataRecord
+		e := r.decodeFromBytes(h, data[hl:tl], df)
+		if e != nil {
+			return e
+		}
+		t.AppData = append(t.AppData, r)
+	}
+
+	if len(data) == tl {
+		return nil
+	}
+	return t.decodeTLSRecords(data[tl:len(data)], df)
+}
+
+// CanDecode implements gopacket.DecodingLayer.
+func (t *TLS) CanDecode() gopacket.LayerClass {
+	return LayerTypeTLS
+}
+
+// NextLayerType implements gopacket.DecodingLayer.
+func (t *TLS) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// Payload returns nil, since TLS encrypted payload is inside TLSAppDataRecord
+func (t *TLS) Payload() []byte {
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+func (t *TLS) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	totalLength := 0
+	for _, record := range t.ChangeCipherSpec {
+		if opts.FixLengths {
+			record.Length = 1
+		}
+		totalLength += 5 + 1 // length of header + record
+	}
+	for range t.Handshake {
+		totalLength += 5
+		// TODO
+	}
+	for _, record := range t.AppData {
+		if opts.FixLengths {
+			record.Length = uint16(len(record.Payload))
+		}
+		totalLength += 5 + len(record.Payload)
+	}
+	for _, record := range t.Alert {
+		if len(record.EncryptedMsg) == 0 {
+			if opts.FixLengths {
+				record.Length = 2
+			}
+			totalLength += 5 + 2
+		} else {
+			if opts.FixLengths {
+				record.Length = uint16(len(record.EncryptedMsg))
+			}
+			totalLength += 5 + len(record.EncryptedMsg)
+		}
+	}
+	data, err := b.PrependBytes(totalLength)
+	if err != nil {
+		return err
+	}
+	off := 0
+	for _, record := range t.ChangeCipherSpec {
+		off = encodeHeader(record.TLSRecordHeader, data, off)
+		data[off] = byte(record.Message)
+		off++
+	}
+	for _, record := range t.Handshake {
+		off = encodeHeader(record.TLSRecordHeader, data, off)
+		// TODO
+	}
+	for _, record := range t.AppData {
+		off = encodeHeader(record.TLSRecordHeader, data, off)
+		copy(data[off:], record.Payload)
+		off += len(record.Payload)
+	}
+	for _, record := range t.Alert {
+		off = encodeHeader(record.TLSRecordHeader, data, off)
+		if len(record.EncryptedMsg) == 0 {
+			data[off] = byte(record.Level)
+			data[off+1] = byte(record.Description)
+			off += 2
+		} else {
+			copy(data[off:], record.EncryptedMsg)
+			off += len(record.EncryptedMsg)
+		}
+	}
+	return nil
+}
+
+func encodeHeader(header TLSRecordHeader, data []byte, offset int) int {
+	data[offset] = byte(header.ContentType)
+	binary.BigEndian.PutUint16(data[offset+1:], uint16(header.Version))
+	binary.BigEndian.PutUint16(data[offset+3:], header.Length)
+
+	return offset + 5
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_alert.go b/vendor/github.com/google/gopacket/layers/tls_alert.go
new file mode 100644
index 0000000..0c5aee0
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_alert.go
@@ -0,0 +1,165 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// TLSAlertLevel defines the alert level data type
+type TLSAlertLevel uint8
+
+// TLSAlertDescr defines the alert descrption data type
+type TLSAlertDescr uint8
+
+const (
+	TLSAlertWarning      TLSAlertLevel = 1
+	TLSAlertFatal        TLSAlertLevel = 2
+	TLSAlertUnknownLevel TLSAlertLevel = 255
+
+	TLSAlertCloseNotify               TLSAlertDescr = 0
+	TLSAlertUnexpectedMessage         TLSAlertDescr = 10
+	TLSAlertBadRecordMac              TLSAlertDescr = 20
+	TLSAlertDecryptionFailedRESERVED  TLSAlertDescr = 21
+	TLSAlertRecordOverflow            TLSAlertDescr = 22
+	TLSAlertDecompressionFailure      TLSAlertDescr = 30
+	TLSAlertHandshakeFailure          TLSAlertDescr = 40
+	TLSAlertNoCertificateRESERVED     TLSAlertDescr = 41
+	TLSAlertBadCertificate            TLSAlertDescr = 42
+	TLSAlertUnsupportedCertificate    TLSAlertDescr = 43
+	TLSAlertCertificateRevoked        TLSAlertDescr = 44
+	TLSAlertCertificateExpired        TLSAlertDescr = 45
+	TLSAlertCertificateUnknown        TLSAlertDescr = 46
+	TLSAlertIllegalParameter          TLSAlertDescr = 47
+	TLSAlertUnknownCa                 TLSAlertDescr = 48
+	TLSAlertAccessDenied              TLSAlertDescr = 49
+	TLSAlertDecodeError               TLSAlertDescr = 50
+	TLSAlertDecryptError              TLSAlertDescr = 51
+	TLSAlertExportRestrictionRESERVED TLSAlertDescr = 60
+	TLSAlertProtocolVersion           TLSAlertDescr = 70
+	TLSAlertInsufficientSecurity      TLSAlertDescr = 71
+	TLSAlertInternalError             TLSAlertDescr = 80
+	TLSAlertUserCanceled              TLSAlertDescr = 90
+	TLSAlertNoRenegotiation           TLSAlertDescr = 100
+	TLSAlertUnsupportedExtension      TLSAlertDescr = 110
+	TLSAlertUnknownDescription        TLSAlertDescr = 255
+)
+
+//  TLS Alert
+//  0  1  2  3  4  5  6  7  8
+//  +--+--+--+--+--+--+--+--+
+//  |         Level         |
+//  +--+--+--+--+--+--+--+--+
+//  |      Description      |
+//  +--+--+--+--+--+--+--+--+
+
+// TLSAlertRecord contains all the information that each Alert Record type should have
+type TLSAlertRecord struct {
+	TLSRecordHeader
+
+	Level       TLSAlertLevel
+	Description TLSAlertDescr
+
+	EncryptedMsg []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAlertRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	if len(data) < 2 {
+		df.SetTruncated()
+		return errors.New("TLS Alert packet too short")
+	}
+
+	if t.Length == 2 {
+		t.Level = TLSAlertLevel(data[0])
+		t.Description = TLSAlertDescr(data[1])
+	} else {
+		t.Level = TLSAlertUnknownLevel
+		t.Description = TLSAlertUnknownDescription
+		t.EncryptedMsg = data
+	}
+
+	return nil
+}
+
+// Strings shows the TLS alert level nicely formatted
+func (al TLSAlertLevel) String() string {
+	switch al {
+	default:
+		return fmt.Sprintf("Unknown(%d)", al)
+	case TLSAlertWarning:
+		return "Warning"
+	case TLSAlertFatal:
+		return "Fatal"
+	}
+}
+
+// Strings shows the TLS alert description nicely formatted
+func (ad TLSAlertDescr) String() string {
+	switch ad {
+	default:
+		return "Unknown"
+	case TLSAlertCloseNotify:
+		return "close_notify"
+	case TLSAlertUnexpectedMessage:
+		return "unexpected_message"
+	case TLSAlertBadRecordMac:
+		return "bad_record_mac"
+	case TLSAlertDecryptionFailedRESERVED:
+		return "decryption_failed_RESERVED"
+	case TLSAlertRecordOverflow:
+		return "record_overflow"
+	case TLSAlertDecompressionFailure:
+		return "decompression_failure"
+	case TLSAlertHandshakeFailure:
+		return "handshake_failure"
+	case TLSAlertNoCertificateRESERVED:
+		return "no_certificate_RESERVED"
+	case TLSAlertBadCertificate:
+		return "bad_certificate"
+	case TLSAlertUnsupportedCertificate:
+		return "unsupported_certificate"
+	case TLSAlertCertificateRevoked:
+		return "certificate_revoked"
+	case TLSAlertCertificateExpired:
+		return "certificate_expired"
+	case TLSAlertCertificateUnknown:
+		return "certificate_unknown"
+	case TLSAlertIllegalParameter:
+		return "illegal_parameter"
+	case TLSAlertUnknownCa:
+		return "unknown_ca"
+	case TLSAlertAccessDenied:
+		return "access_denied"
+	case TLSAlertDecodeError:
+		return "decode_error"
+	case TLSAlertDecryptError:
+		return "decrypt_error"
+	case TLSAlertExportRestrictionRESERVED:
+		return "export_restriction_RESERVED"
+	case TLSAlertProtocolVersion:
+		return "protocol_version"
+	case TLSAlertInsufficientSecurity:
+		return "insufficient_security"
+	case TLSAlertInternalError:
+		return "internal_error"
+	case TLSAlertUserCanceled:
+		return "user_canceled"
+	case TLSAlertNoRenegotiation:
+		return "no_renegotiation"
+	case TLSAlertUnsupportedExtension:
+		return "unsupported_extension"
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_appdata.go b/vendor/github.com/google/gopacket/layers/tls_appdata.go
new file mode 100644
index 0000000..dedd1d5
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_appdata.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// TLSAppDataRecord contains all the information that each AppData Record types should have
+type TLSAppDataRecord struct {
+	TLSRecordHeader
+	Payload []byte
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSAppDataRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	if len(data) != int(t.Length) {
+		return errors.New("TLS Application Data length mismatch")
+	}
+
+	t.Payload = data
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_cipherspec.go b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
new file mode 100644
index 0000000..8f3dc62
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_cipherspec.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"errors"
+
+	"github.com/google/gopacket"
+)
+
+// TLSchangeCipherSpec defines the message value inside ChangeCipherSpec Record
+type TLSchangeCipherSpec uint8
+
+const (
+	TLSChangecipherspecMessage TLSchangeCipherSpec = 1
+	TLSChangecipherspecUnknown TLSchangeCipherSpec = 255
+)
+
+//  TLS Change Cipher Spec
+//  0  1  2  3  4  5  6  7  8
+//  +--+--+--+--+--+--+--+--+
+//  |        Message        |
+//  +--+--+--+--+--+--+--+--+
+
+// TLSChangeCipherSpecRecord defines the type of data inside ChangeCipherSpec Record
+type TLSChangeCipherSpecRecord struct {
+	TLSRecordHeader
+
+	Message TLSchangeCipherSpec
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSChangeCipherSpecRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	if len(data) != 1 {
+		df.SetTruncated()
+		return errors.New("TLS Change Cipher Spec record incorrect length")
+	}
+
+	t.Message = TLSchangeCipherSpec(data[0])
+	if t.Message != TLSChangecipherspecMessage {
+		t.Message = TLSChangecipherspecUnknown
+	}
+
+	return nil
+}
+
+// String shows the message value nicely formatted
+func (ccs TLSchangeCipherSpec) String() string {
+	switch ccs {
+	default:
+		return "Unknown"
+	case TLSChangecipherspecMessage:
+		return "Change Cipher Spec Message"
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layers/tls_handshake.go b/vendor/github.com/google/gopacket/layers/tls_handshake.go
new file mode 100644
index 0000000..e45e2c7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/tls_handshake.go
@@ -0,0 +1,28 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"github.com/google/gopacket"
+)
+
+// TLSHandshakeRecord defines the structure of a Handshare Record
+type TLSHandshakeRecord struct {
+	TLSRecordHeader
+}
+
+// DecodeFromBytes decodes the slice into the TLS struct.
+func (t *TLSHandshakeRecord) decodeFromBytes(h TLSRecordHeader, data []byte, df gopacket.DecodeFeedback) error {
+	// TLS Record Header
+	t.ContentType = h.ContentType
+	t.Version = h.Version
+	t.Length = h.Length
+
+	// TODO
+
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers/udp.go b/vendor/github.com/google/gopacket/layers/udp.go
new file mode 100644
index 0000000..97e81c6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udp.go
@@ -0,0 +1,133 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+// UDP is the layer for UDP headers.
+type UDP struct {
+	BaseLayer
+	SrcPort, DstPort UDPPort
+	Length           uint16
+	Checksum         uint16
+	sPort, dPort     []byte
+	tcpipchecksum
+}
+
+// LayerType returns gopacket.LayerTypeUDP
+func (u *UDP) LayerType() gopacket.LayerType { return LayerTypeUDP }
+
+func (udp *UDP) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		df.SetTruncated()
+		return fmt.Errorf("Invalid UDP header. Length %d less than 8", len(data))
+	}
+	udp.SrcPort = UDPPort(binary.BigEndian.Uint16(data[0:2]))
+	udp.sPort = data[0:2]
+	udp.DstPort = UDPPort(binary.BigEndian.Uint16(data[2:4]))
+	udp.dPort = data[2:4]
+	udp.Length = binary.BigEndian.Uint16(data[4:6])
+	udp.Checksum = binary.BigEndian.Uint16(data[6:8])
+	udp.BaseLayer = BaseLayer{Contents: data[:8]}
+	switch {
+	case udp.Length >= 8:
+		hlen := int(udp.Length)
+		if hlen > len(data) {
+			df.SetTruncated()
+			hlen = len(data)
+		}
+		udp.Payload = data[8:hlen]
+	case udp.Length == 0: // Jumbogram, use entire rest of data
+		udp.Payload = data[8:]
+	default:
+		return fmt.Errorf("UDP packet too small: %d bytes", udp.Length)
+	}
+	return nil
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (u *UDP) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	var jumbo bool
+
+	payload := b.Bytes()
+	if _, ok := u.pseudoheader.(*IPv6); ok {
+		if len(payload)+8 > 65535 {
+			jumbo = true
+		}
+	}
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+	binary.BigEndian.PutUint16(bytes, uint16(u.SrcPort))
+	binary.BigEndian.PutUint16(bytes[2:], uint16(u.DstPort))
+	if opts.FixLengths {
+		if jumbo {
+			u.Length = 0
+		} else {
+			u.Length = uint16(len(payload)) + 8
+		}
+	}
+	binary.BigEndian.PutUint16(bytes[4:], u.Length)
+	if opts.ComputeChecksums {
+		// zero out checksum bytes
+		bytes[6] = 0
+		bytes[7] = 0
+		csum, err := u.computeChecksum(b.Bytes(), IPProtocolUDP)
+		if err != nil {
+			return err
+		}
+		u.Checksum = csum
+	}
+	binary.BigEndian.PutUint16(bytes[6:], u.Checksum)
+	return nil
+}
+
+func (u *UDP) CanDecode() gopacket.LayerClass {
+	return LayerTypeUDP
+}
+
+// NextLayerType use the destination port to select the
+// right next decoder. It tries first to decode via the
+// destination port, then the source port.
+func (u *UDP) NextLayerType() gopacket.LayerType {
+	if lt := u.DstPort.LayerType(); lt != gopacket.LayerTypePayload {
+		return lt
+	}
+	return u.SrcPort.LayerType()
+}
+
+func decodeUDP(data []byte, p gopacket.PacketBuilder) error {
+	udp := &UDP{}
+	err := udp.DecodeFromBytes(data, p)
+	p.AddLayer(udp)
+	p.SetTransportLayer(udp)
+	if err != nil {
+		return err
+	}
+	return p.NextDecoder(udp.NextLayerType())
+}
+
+func (u *UDP) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointUDPPort, u.sPort, u.dPort)
+}
+
+// For testing only
+func (u *UDP) SetInternalPortsForTesting() {
+	u.sPort = make([]byte, 2)
+	u.dPort = make([]byte, 2)
+	binary.BigEndian.PutUint16(u.sPort, uint16(u.SrcPort))
+	binary.BigEndian.PutUint16(u.dPort, uint16(u.DstPort))
+}
diff --git a/vendor/github.com/google/gopacket/layers/udplite.go b/vendor/github.com/google/gopacket/layers/udplite.go
new file mode 100644
index 0000000..7d84c51
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/udplite.go
@@ -0,0 +1,44 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+// Copyright 2009-2011 Andreas Krennmair. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"github.com/google/gopacket"
+)
+
+// UDPLite is the layer for UDP-Lite headers (rfc 3828).
+type UDPLite struct {
+	BaseLayer
+	SrcPort, DstPort UDPLitePort
+	ChecksumCoverage uint16
+	Checksum         uint16
+	sPort, dPort     []byte
+}
+
+// LayerType returns gopacket.LayerTypeUDPLite
+func (u *UDPLite) LayerType() gopacket.LayerType { return LayerTypeUDPLite }
+
+func decodeUDPLite(data []byte, p gopacket.PacketBuilder) error {
+	udp := &UDPLite{
+		SrcPort:          UDPLitePort(binary.BigEndian.Uint16(data[0:2])),
+		sPort:            data[0:2],
+		DstPort:          UDPLitePort(binary.BigEndian.Uint16(data[2:4])),
+		dPort:            data[2:4],
+		ChecksumCoverage: binary.BigEndian.Uint16(data[4:6]),
+		Checksum:         binary.BigEndian.Uint16(data[6:8]),
+		BaseLayer:        BaseLayer{data[:8], data[8:]},
+	}
+	p.AddLayer(udp)
+	p.SetTransportLayer(udp)
+	return p.NextDecoder(gopacket.LayerTypePayload)
+}
+
+func (u *UDPLite) TransportFlow() gopacket.Flow {
+	return gopacket.NewFlow(EndpointUDPLitePort, u.sPort, u.dPort)
+}
diff --git a/vendor/github.com/google/gopacket/layers/usb.go b/vendor/github.com/google/gopacket/layers/usb.go
new file mode 100644
index 0000000..d611e0f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/usb.go
@@ -0,0 +1,292 @@
+// Copyright 2014 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"github.com/google/gopacket"
+)
+
+type USBEventType uint8
+
+const (
+	USBEventTypeSubmit   USBEventType = 'S'
+	USBEventTypeComplete USBEventType = 'C'
+	USBEventTypeError    USBEventType = 'E'
+)
+
+func (a USBEventType) String() string {
+	switch a {
+	case USBEventTypeSubmit:
+		return "SUBMIT"
+	case USBEventTypeComplete:
+		return "COMPLETE"
+	case USBEventTypeError:
+		return "ERROR"
+	default:
+		return "Unknown event type"
+	}
+}
+
+type USBRequestBlockSetupRequest uint8
+
+const (
+	USBRequestBlockSetupRequestGetStatus        USBRequestBlockSetupRequest = 0x00
+	USBRequestBlockSetupRequestClearFeature     USBRequestBlockSetupRequest = 0x01
+	USBRequestBlockSetupRequestSetFeature       USBRequestBlockSetupRequest = 0x03
+	USBRequestBlockSetupRequestSetAddress       USBRequestBlockSetupRequest = 0x05
+	USBRequestBlockSetupRequestGetDescriptor    USBRequestBlockSetupRequest = 0x06
+	USBRequestBlockSetupRequestSetDescriptor    USBRequestBlockSetupRequest = 0x07
+	USBRequestBlockSetupRequestGetConfiguration USBRequestBlockSetupRequest = 0x08
+	USBRequestBlockSetupRequestSetConfiguration USBRequestBlockSetupRequest = 0x09
+	USBRequestBlockSetupRequestSetIdle          USBRequestBlockSetupRequest = 0x0a
+)
+
+func (a USBRequestBlockSetupRequest) String() string {
+	switch a {
+	case USBRequestBlockSetupRequestGetStatus:
+		return "GET_STATUS"
+	case USBRequestBlockSetupRequestClearFeature:
+		return "CLEAR_FEATURE"
+	case USBRequestBlockSetupRequestSetFeature:
+		return "SET_FEATURE"
+	case USBRequestBlockSetupRequestSetAddress:
+		return "SET_ADDRESS"
+	case USBRequestBlockSetupRequestGetDescriptor:
+		return "GET_DESCRIPTOR"
+	case USBRequestBlockSetupRequestSetDescriptor:
+		return "SET_DESCRIPTOR"
+	case USBRequestBlockSetupRequestGetConfiguration:
+		return "GET_CONFIGURATION"
+	case USBRequestBlockSetupRequestSetConfiguration:
+		return "SET_CONFIGURATION"
+	case USBRequestBlockSetupRequestSetIdle:
+		return "SET_IDLE"
+	default:
+		return "UNKNOWN"
+	}
+}
+
+type USBTransportType uint8
+
+const (
+	USBTransportTypeTransferIn  USBTransportType = 0x80 // Indicates send or receive
+	USBTransportTypeIsochronous USBTransportType = 0x00 // Isochronous transfers occur continuously and periodically. They typically contain time sensitive information, such as an audio or video stream.
+	USBTransportTypeInterrupt   USBTransportType = 0x01 // Interrupt transfers are typically non-periodic, small device "initiated" communication requiring bounded latency, such as pointing devices or keyboards.
+	USBTransportTypeControl     USBTransportType = 0x02 // Control transfers are typically used for command and status operations.
+	USBTransportTypeBulk        USBTransportType = 0x03 // Bulk transfers can be used for large bursty data, using all remaining available bandwidth, no guarantees on bandwidth or latency, such as file transfers.
+)
+
+type USBDirectionType uint8
+
+const (
+	USBDirectionTypeUnknown USBDirectionType = iota
+	USBDirectionTypeIn
+	USBDirectionTypeOut
+)
+
+func (a USBDirectionType) String() string {
+	switch a {
+	case USBDirectionTypeIn:
+		return "In"
+	case USBDirectionTypeOut:
+		return "Out"
+	default:
+		return "Unknown direction type"
+	}
+}
+
+// The reference at http://www.beyondlogic.org/usbnutshell/usb1.shtml contains more information about the protocol.
+type USB struct {
+	BaseLayer
+	ID             uint64
+	EventType      USBEventType
+	TransferType   USBTransportType
+	Direction      USBDirectionType
+	EndpointNumber uint8
+	DeviceAddress  uint8
+	BusID          uint16
+	TimestampSec   int64
+	TimestampUsec  int32
+	Setup          bool
+	Data           bool
+	Status         int32
+	UrbLength      uint32
+	UrbDataLength  uint32
+
+	UrbInterval            uint32
+	UrbStartFrame          uint32
+	UrbCopyOfTransferFlags uint32
+	IsoNumDesc             uint32
+}
+
+func (u *USB) LayerType() gopacket.LayerType { return LayerTypeUSB }
+
+func (m *USB) NextLayerType() gopacket.LayerType {
+	if m.Setup {
+		return LayerTypeUSBRequestBlockSetup
+	} else if m.Data {
+	}
+
+	return m.TransferType.LayerType()
+}
+
+func decodeUSB(data []byte, p gopacket.PacketBuilder) error {
+	d := &USB{}
+
+	return decodingLayerDecoder(d, data, p)
+}
+
+func (m *USB) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 40 {
+		df.SetTruncated()
+		return errors.New("USB < 40 bytes")
+	}
+	m.ID = binary.LittleEndian.Uint64(data[0:8])
+	m.EventType = USBEventType(data[8])
+	m.TransferType = USBTransportType(data[9])
+
+	m.EndpointNumber = data[10] & 0x7f
+	if data[10]&uint8(USBTransportTypeTransferIn) > 0 {
+		m.Direction = USBDirectionTypeIn
+	} else {
+		m.Direction = USBDirectionTypeOut
+	}
+
+	m.DeviceAddress = data[11]
+	m.BusID = binary.LittleEndian.Uint16(data[12:14])
+
+	if uint(data[14]) == 0 {
+		m.Setup = true
+	}
+
+	if uint(data[15]) == 0 {
+		m.Data = true
+	}
+
+	m.TimestampSec = int64(binary.LittleEndian.Uint64(data[16:24]))
+	m.TimestampUsec = int32(binary.LittleEndian.Uint32(data[24:28]))
+	m.Status = int32(binary.LittleEndian.Uint32(data[28:32]))
+	m.UrbLength = binary.LittleEndian.Uint32(data[32:36])
+	m.UrbDataLength = binary.LittleEndian.Uint32(data[36:40])
+
+	m.Contents = data[:40]
+	m.Payload = data[40:]
+
+	if m.Setup {
+		m.Payload = data[40:]
+	} else if m.Data {
+		m.Payload = data[uint32(len(data))-m.UrbDataLength:]
+	}
+
+	// if 64 bit, dissect_linux_usb_pseudo_header_ext
+	if false {
+		m.UrbInterval = binary.LittleEndian.Uint32(data[40:44])
+		m.UrbStartFrame = binary.LittleEndian.Uint32(data[44:48])
+		m.UrbDataLength = binary.LittleEndian.Uint32(data[48:52])
+		m.IsoNumDesc = binary.LittleEndian.Uint32(data[52:56])
+		m.Contents = data[:56]
+		m.Payload = data[56:]
+	}
+
+	// crc5 or crc16
+	// eop (end of packet)
+
+	return nil
+}
+
+type USBRequestBlockSetup struct {
+	BaseLayer
+	RequestType uint8
+	Request     USBRequestBlockSetupRequest
+	Value       uint16
+	Index       uint16
+	Length      uint16
+}
+
+func (u *USBRequestBlockSetup) LayerType() gopacket.LayerType { return LayerTypeUSBRequestBlockSetup }
+
+func (m *USBRequestBlockSetup) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBRequestBlockSetup) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.RequestType = data[0]
+	m.Request = USBRequestBlockSetupRequest(data[1])
+	m.Value = binary.LittleEndian.Uint16(data[2:4])
+	m.Index = binary.LittleEndian.Uint16(data[4:6])
+	m.Length = binary.LittleEndian.Uint16(data[6:8])
+	m.Contents = data[:8]
+	m.Payload = data[8:]
+	return nil
+}
+
+func decodeUSBRequestBlockSetup(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBRequestBlockSetup{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type USBControl struct {
+	BaseLayer
+}
+
+func (u *USBControl) LayerType() gopacket.LayerType { return LayerTypeUSBControl }
+
+func (m *USBControl) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBControl) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeUSBControl(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBControl{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type USBInterrupt struct {
+	BaseLayer
+}
+
+func (u *USBInterrupt) LayerType() gopacket.LayerType { return LayerTypeUSBInterrupt }
+
+func (m *USBInterrupt) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBInterrupt) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeUSBInterrupt(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBInterrupt{}
+	return decodingLayerDecoder(d, data, p)
+}
+
+type USBBulk struct {
+	BaseLayer
+}
+
+func (u *USBBulk) LayerType() gopacket.LayerType { return LayerTypeUSBBulk }
+
+func (m *USBBulk) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypePayload
+}
+
+func (m *USBBulk) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	m.Contents = data
+	return nil
+}
+
+func decodeUSBBulk(data []byte, p gopacket.PacketBuilder) error {
+	d := &USBBulk{}
+	return decodingLayerDecoder(d, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vrrp.go b/vendor/github.com/google/gopacket/layers/vrrp.go
new file mode 100644
index 0000000..ffaafe6
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vrrp.go
@@ -0,0 +1,156 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+
+	"github.com/google/gopacket"
+)
+
+/*
+	This layer provides decoding for Virtual Router Redundancy Protocol (VRRP) v2.
+	https://tools.ietf.org/html/rfc3768#section-5
+    0                   1                   2                   3
+    0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |Version| Type  | Virtual Rtr ID|   Priority    | Count IP Addrs|
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |   Auth Type   |   Adver Int   |          Checksum             |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                         IP Address (1)                        |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                            .                                  |
+   |                            .                                  |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                         IP Address (n)                        |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                     Authentication Data (1)                   |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+   |                     Authentication Data (2)                   |
+   +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+*/
+
+type VRRPv2Type uint8
+type VRRPv2AuthType uint8
+
+const (
+	VRRPv2Advertisement VRRPv2Type = 0x01 // router advertisement
+)
+
+// String conversions for VRRP message types
+func (v VRRPv2Type) String() string {
+	switch v {
+	case VRRPv2Advertisement:
+		return "VRRPv2 Advertisement"
+	default:
+		return ""
+	}
+}
+
+const (
+	VRRPv2AuthNoAuth    VRRPv2AuthType = 0x00 // No Authentication
+	VRRPv2AuthReserved1 VRRPv2AuthType = 0x01 // Reserved field 1
+	VRRPv2AuthReserved2 VRRPv2AuthType = 0x02 // Reserved field 2
+)
+
+func (v VRRPv2AuthType) String() string {
+	switch v {
+	case VRRPv2AuthNoAuth:
+		return "No Authentication"
+	case VRRPv2AuthReserved1:
+		return "Reserved"
+	case VRRPv2AuthReserved2:
+		return "Reserved"
+	default:
+		return ""
+	}
+}
+
+// VRRPv2 represents an VRRP v2 message.
+type VRRPv2 struct {
+	BaseLayer
+	Version      uint8          // The version field specifies the VRRP protocol version of this packet (v2)
+	Type         VRRPv2Type     // The type field specifies the type of this VRRP packet.  The only type defined in v2 is ADVERTISEMENT
+	VirtualRtrID uint8          // identifies the virtual router this packet is reporting status for
+	Priority     uint8          // specifies the sending VRRP router's priority for the virtual router (100 = default)
+	CountIPAddr  uint8          // The number of IP addresses contained in this VRRP advertisement.
+	AuthType     VRRPv2AuthType // identifies the authentication method being utilized
+	AdverInt     uint8          // The Advertisement interval indicates the time interval (in seconds) between ADVERTISEMENTS.  The default is 1 second
+	Checksum     uint16         // used to detect data corruption in the VRRP message.
+	IPAddress    []net.IP       // one or more IP addresses associated with the virtual router. Specified in the CountIPAddr field.
+}
+
+// LayerType returns LayerTypeVRRP for VRRP v2 message.
+func (v *VRRPv2) LayerType() gopacket.LayerType { return LayerTypeVRRP }
+
+func (v *VRRPv2) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+
+	v.BaseLayer = BaseLayer{Contents: data[:len(data)]}
+	v.Version = data[0] >> 4 // high nibble == VRRP version. We're expecting v2
+
+	v.Type = VRRPv2Type(data[0] & 0x0F) // low nibble == VRRP type. Expecting 1 (advertisement)
+	if v.Type != 1 {
+		// rfc3768: A packet with unknown type MUST be discarded.
+		return errors.New("Unrecognized VRRPv2 type field.")
+	}
+
+	v.VirtualRtrID = data[1]
+	v.Priority = data[2]
+
+	v.CountIPAddr = data[3]
+	if v.CountIPAddr < 1 {
+		return errors.New("VRRPv2 number of IP addresses is not valid.")
+	}
+
+	v.AuthType = VRRPv2AuthType(data[4])
+	v.AdverInt = uint8(data[5])
+	v.Checksum = binary.BigEndian.Uint16(data[6:8])
+
+	// populate the IPAddress field. The number of addresses is specified in the v.CountIPAddr field
+	// offset references the starting byte containing the list of ip addresses
+	offset := 8
+	for i := uint8(0); i < v.CountIPAddr; i++ {
+		v.IPAddress = append(v.IPAddress, data[offset:offset+4])
+		offset += 4
+	}
+
+	//	any trailing packets here may be authentication data and *should* be ignored in v2 as per RFC
+	//
+	//			5.3.10.  Authentication Data
+	//
+	//			The authentication string is currently only used to maintain
+	//			backwards compatibility with RFC 2338.  It SHOULD be set to zero on
+	//	   		transmission and ignored on reception.
+	return nil
+}
+
+// CanDecode specifies the layer type in which we are attempting to unwrap.
+func (v *VRRPv2) CanDecode() gopacket.LayerClass {
+	return LayerTypeVRRP
+}
+
+// NextLayerType specifies the next layer that should be decoded. VRRP does not contain any further payload, so we set to 0
+func (v *VRRPv2) NextLayerType() gopacket.LayerType {
+	return gopacket.LayerTypeZero
+}
+
+// The VRRP packet does not include payload data. Setting byte slice to nil
+func (v *VRRPv2) Payload() []byte {
+	return nil
+}
+
+// decodeVRRP will parse VRRP v2
+func decodeVRRP(data []byte, p gopacket.PacketBuilder) error {
+	if len(data) < 8 {
+		return errors.New("Not a valid VRRP packet. Packet length is too small.")
+	}
+	v := &VRRPv2{}
+	return decodingLayerDecoder(v, data, p)
+}
diff --git a/vendor/github.com/google/gopacket/layers/vxlan.go b/vendor/github.com/google/gopacket/layers/vxlan.go
new file mode 100644
index 0000000..e479cd8
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers/vxlan.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package layers
+
+import (
+	"encoding/binary"
+	"errors"
+	"fmt"
+
+	"github.com/google/gopacket"
+)
+
+//  VXLAN is specifed in RFC 7348 https://tools.ietf.org/html/rfc7348
+//  G, D, A, Group Policy ID from https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+//  0                   1                   2                   3
+//  0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+//  0             8               16              24              32
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |G|R|R|R|I|R|R|R|R|D|R|R|A|R|R|R|       Group Policy ID         |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |     24 bit VXLAN Network Identifier           |   Reserved    |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+
+// VXLAN is a VXLAN packet header
+type VXLAN struct {
+	BaseLayer
+	ValidIDFlag      bool   // 'I' bit per RFC 7348
+	VNI              uint32 // 'VXLAN Network Identifier' 24 bits per RFC 7348
+	GBPExtension     bool   // 'G' bit per Group Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+	GBPDontLearn     bool   // 'D' bit per Group Policy
+	GBPApplied       bool   // 'A' bit per Group Policy
+	GBPGroupPolicyID uint16 // 'Group Policy ID' 16 bits per Group Policy
+}
+
+// LayerType returns LayerTypeVXLAN
+func (vx *VXLAN) LayerType() gopacket.LayerType { return LayerTypeVXLAN }
+
+// CanDecode returns the layer type this DecodingLayer can decode
+func (vx *VXLAN) CanDecode() gopacket.LayerClass {
+	return LayerTypeVXLAN
+}
+
+// NextLayerType retuns the next layer we should see after vxlan
+func (vx *VXLAN) NextLayerType() gopacket.LayerType {
+	return LayerTypeEthernet
+}
+
+// DecodeFromBytes takes a byte buffer and decodes
+func (vx *VXLAN) DecodeFromBytes(data []byte, df gopacket.DecodeFeedback) error {
+	if len(data) < 8 {
+		return errors.New("vxlan packet too small")
+	}
+	// VNI is a 24bit number, Uint32 requires 32 bits
+	var buf [4]byte
+	copy(buf[1:], data[4:7])
+
+	// RFC 7348 https://tools.ietf.org/html/rfc7348
+	vx.ValidIDFlag = data[0]&0x08 > 0        // 'I' bit per RFC7348
+	vx.VNI = binary.BigEndian.Uint32(buf[:]) // VXLAN Network Identifier per RFC7348
+
+	// Group Based Policy https://tools.ietf.org/html/draft-smith-vxlan-group-policy-00
+	vx.GBPExtension = data[0]&0x80 > 0                       // 'G' bit per the group policy draft
+	vx.GBPDontLearn = data[1]&0x40 > 0                       // 'D' bit - the egress VTEP MUST NOT learn the source address of the encapsulated frame.
+	vx.GBPApplied = data[1]&0x80 > 0                         // 'A' bit - indicates that the group policy has already been applied to this packet.
+	vx.GBPGroupPolicyID = binary.BigEndian.Uint16(data[2:4]) // Policy ID as per the group policy draft
+
+	// Layer information
+	const vxlanLength = 8
+	vx.Contents = data[:vxlanLength]
+	vx.Payload = data[vxlanLength:]
+
+	return nil
+
+}
+
+func decodeVXLAN(data []byte, p gopacket.PacketBuilder) error {
+	vx := &VXLAN{}
+	err := vx.DecodeFromBytes(data, p)
+	if err != nil {
+		return err
+	}
+
+	p.AddLayer(vx)
+	return p.NextDecoder(LinkTypeEthernet)
+}
+
+// SerializeTo writes the serialized form of this layer into the
+// SerializationBuffer, implementing gopacket.SerializableLayer.
+// See the docs for gopacket.SerializableLayer for more info.
+func (vx *VXLAN) SerializeTo(b gopacket.SerializeBuffer, opts gopacket.SerializeOptions) error {
+	bytes, err := b.PrependBytes(8)
+	if err != nil {
+		return err
+	}
+
+	// PrependBytes does not guarantee that bytes are zeroed.  Setting flags via OR requires that they start off at zero
+	bytes[0] = 0
+	bytes[1] = 0
+
+	if vx.ValidIDFlag {
+		bytes[0] |= 0x08
+	}
+	if vx.GBPExtension {
+		bytes[0] |= 0x80
+	}
+	if vx.GBPDontLearn {
+		bytes[1] |= 0x40
+	}
+	if vx.GBPApplied {
+		bytes[1] |= 0x80
+	}
+
+	binary.BigEndian.PutUint16(bytes[2:4], vx.GBPGroupPolicyID)
+	if vx.VNI >= 1<<24 {
+		return fmt.Errorf("Virtual Network Identifier = %x exceeds max for 24-bit uint", vx.VNI)
+	}
+	binary.BigEndian.PutUint32(bytes[4:8], vx.VNI<<8)
+	return nil
+}
diff --git a/vendor/github.com/google/gopacket/layers_decoder.go b/vendor/github.com/google/gopacket/layers_decoder.go
new file mode 100644
index 0000000..8c1f108
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layers_decoder.go
@@ -0,0 +1,101 @@
+// Copyright 2019 The GoPacket Authors. All rights reserved.
+
+package gopacket
+
+// Created by gen.go, don't edit manually
+// Generated at 2019-06-18 11:37:31.308731293 +0600 +06 m=+0.000842599
+
+// LayersDecoder returns DecodingLayerFunc for specified
+// DecodingLayerContainer, LayerType value to start decoding with and
+// some DecodeFeedback.
+func LayersDecoder(dl DecodingLayerContainer, first LayerType, df DecodeFeedback) DecodingLayerFunc {
+	firstDec, ok := dl.Decoder(first)
+	if !ok {
+		return func([]byte, *[]LayerType) (LayerType, error) {
+			return first, nil
+		}
+	}
+	if dlc, ok := dl.(DecodingLayerSparse); ok {
+		return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+			*decoded = (*decoded)[:0] // Truncated decoded layers.
+			typ := first
+			decoder := firstDec
+			for {
+				if err := decoder.DecodeFromBytes(data, df); err != nil {
+					return LayerTypeZero, err
+				}
+				*decoded = append(*decoded, typ)
+				typ = decoder.NextLayerType()
+				if data = decoder.LayerPayload(); len(data) == 0 {
+					break
+				}
+				if decoder, ok = dlc.Decoder(typ); !ok {
+					return typ, nil
+				}
+			}
+			return LayerTypeZero, nil
+		}
+	}
+	if dlc, ok := dl.(DecodingLayerArray); ok {
+		return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+			*decoded = (*decoded)[:0] // Truncated decoded layers.
+			typ := first
+			decoder := firstDec
+			for {
+				if err := decoder.DecodeFromBytes(data, df); err != nil {
+					return LayerTypeZero, err
+				}
+				*decoded = append(*decoded, typ)
+				typ = decoder.NextLayerType()
+				if data = decoder.LayerPayload(); len(data) == 0 {
+					break
+				}
+				if decoder, ok = dlc.Decoder(typ); !ok {
+					return typ, nil
+				}
+			}
+			return LayerTypeZero, nil
+		}
+	}
+	if dlc, ok := dl.(DecodingLayerMap); ok {
+		return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+			*decoded = (*decoded)[:0] // Truncated decoded layers.
+			typ := first
+			decoder := firstDec
+			for {
+				if err := decoder.DecodeFromBytes(data, df); err != nil {
+					return LayerTypeZero, err
+				}
+				*decoded = append(*decoded, typ)
+				typ = decoder.NextLayerType()
+				if data = decoder.LayerPayload(); len(data) == 0 {
+					break
+				}
+				if decoder, ok = dlc.Decoder(typ); !ok {
+					return typ, nil
+				}
+			}
+			return LayerTypeZero, nil
+		}
+	}
+	dlc := dl
+	return func(data []byte, decoded *[]LayerType) (LayerType, error) {
+		*decoded = (*decoded)[:0] // Truncated decoded layers.
+		typ := first
+		decoder := firstDec
+		for {
+			if err := decoder.DecodeFromBytes(data, df); err != nil {
+				return LayerTypeZero, err
+			}
+			*decoded = append(*decoded, typ)
+			typ = decoder.NextLayerType()
+			if data = decoder.LayerPayload(); len(data) == 0 {
+				break
+			}
+			if decoder, ok = dlc.Decoder(typ); !ok {
+				return typ, nil
+			}
+		}
+		return LayerTypeZero, nil
+	}
+}
diff --git a/vendor/github.com/google/gopacket/layertype.go b/vendor/github.com/google/gopacket/layertype.go
new file mode 100644
index 0000000..3abfee1
--- /dev/null
+++ b/vendor/github.com/google/gopacket/layertype.go
@@ -0,0 +1,111 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+	"strconv"
+)
+
+// LayerType is a unique identifier for each type of layer.  This enumeration
+// does not match with any externally available numbering scheme... it's solely
+// usable/useful within this library as a means for requesting layer types
+// (see Packet.Layer) and determining which types of layers have been decoded.
+//
+// New LayerTypes may be created by calling gopacket.RegisterLayerType.
+type LayerType int64
+
+// LayerTypeMetadata contains metadata associated with each LayerType.
+type LayerTypeMetadata struct {
+	// Name is the string returned by each layer type's String method.
+	Name string
+	// Decoder is the decoder to use when the layer type is passed in as a
+	// Decoder.
+	Decoder Decoder
+}
+
+type layerTypeMetadata struct {
+	inUse bool
+	LayerTypeMetadata
+}
+
+// DecodersByLayerName maps layer names to decoders for those layers.
+// This allows users to specify decoders by name to a program and have that
+// program pick the correct decoder accordingly.
+var DecodersByLayerName = map[string]Decoder{}
+
+const maxLayerType = 2000
+
+var ltMeta [maxLayerType]layerTypeMetadata
+var ltMetaMap = map[LayerType]layerTypeMetadata{}
+
+// RegisterLayerType creates a new layer type and registers it globally.
+// The number passed in must be unique, or a runtime panic will occur.  Numbers
+// 0-999 are reserved for the gopacket library.  Numbers 1000-1999 should be
+// used for common application-specific types, and are very fast.  Any other
+// number (negative or >= 2000) may be used for uncommon application-specific
+// types, and are somewhat slower (they require a map lookup over an array
+// index).
+func RegisterLayerType(num int, meta LayerTypeMetadata) LayerType {
+	if 0 <= num && num < maxLayerType {
+		if ltMeta[num].inUse {
+			panic("Layer type already exists")
+		}
+	} else {
+		if ltMetaMap[LayerType(num)].inUse {
+			panic("Layer type already exists")
+		}
+	}
+	return OverrideLayerType(num, meta)
+}
+
+// OverrideLayerType acts like RegisterLayerType, except that if the layer type
+// has already been registered, it overrides the metadata with the passed-in
+// metadata intead of panicing.
+func OverrideLayerType(num int, meta LayerTypeMetadata) LayerType {
+	if 0 <= num && num < maxLayerType {
+		ltMeta[num] = layerTypeMetadata{
+			inUse:             true,
+			LayerTypeMetadata: meta,
+		}
+	} else {
+		ltMetaMap[LayerType(num)] = layerTypeMetadata{
+			inUse:             true,
+			LayerTypeMetadata: meta,
+		}
+	}
+	DecodersByLayerName[meta.Name] = meta.Decoder
+	return LayerType(num)
+}
+
+// Decode decodes the given data using the decoder registered with the layer
+// type.
+func (t LayerType) Decode(data []byte, c PacketBuilder) error {
+	var d Decoder
+	if 0 <= int(t) && int(t) < maxLayerType {
+		d = ltMeta[int(t)].Decoder
+	} else {
+		d = ltMetaMap[t].Decoder
+	}
+	if d != nil {
+		return d.Decode(data, c)
+	}
+	return fmt.Errorf("Layer type %v has no associated decoder", t)
+}
+
+// String returns the string associated with this layer type.
+func (t LayerType) String() (s string) {
+	if 0 <= int(t) && int(t) < maxLayerType {
+		s = ltMeta[int(t)].Name
+	} else {
+		s = ltMetaMap[t].Name
+	}
+	if s == "" {
+		s = strconv.Itoa(int(t))
+	}
+	return
+}
diff --git a/vendor/github.com/google/gopacket/packet.go b/vendor/github.com/google/gopacket/packet.go
new file mode 100644
index 0000000..3a7c4b3
--- /dev/null
+++ b/vendor/github.com/google/gopacket/packet.go
@@ -0,0 +1,864 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"bytes"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"os"
+	"reflect"
+	"runtime/debug"
+	"strings"
+	"syscall"
+	"time"
+)
+
+// CaptureInfo provides standardized information about a packet captured off
+// the wire or read from a file.
+type CaptureInfo struct {
+	// Timestamp is the time the packet was captured, if that is known.
+	Timestamp time.Time
+	// CaptureLength is the total number of bytes read off of the wire.
+	CaptureLength int
+	// Length is the size of the original packet.  Should always be >=
+	// CaptureLength.
+	Length int
+	// InterfaceIndex
+	InterfaceIndex int
+	// The packet source can place ancillary data of various types here.
+	// For example, the afpacket source can report the VLAN of captured
+	// packets this way.
+	AncillaryData []interface{}
+}
+
+// PacketMetadata contains metadata for a packet.
+type PacketMetadata struct {
+	CaptureInfo
+	// Truncated is true if packet decoding logic detects that there are fewer
+	// bytes in the packet than are detailed in various headers (for example, if
+	// the number of bytes in the IPv4 contents/payload is less than IPv4.Length).
+	// This is also set automatically for packets captured off the wire if
+	// CaptureInfo.CaptureLength < CaptureInfo.Length.
+	Truncated bool
+}
+
+// Packet is the primary object used by gopacket.  Packets are created by a
+// Decoder's Decode call.  A packet is made up of a set of Data, which
+// is broken into a number of Layers as it is decoded.
+type Packet interface {
+	//// Functions for outputting the packet as a human-readable string:
+	//// ------------------------------------------------------------------
+	// String returns a human-readable string representation of the packet.
+	// It uses LayerString on each layer to output the layer.
+	String() string
+	// Dump returns a verbose human-readable string representation of the packet,
+	// including a hex dump of all layers.  It uses LayerDump on each layer to
+	// output the layer.
+	Dump() string
+
+	//// Functions for accessing arbitrary packet layers:
+	//// ------------------------------------------------------------------
+	// Layers returns all layers in this packet, computing them as necessary
+	Layers() []Layer
+	// Layer returns the first layer in this packet of the given type, or nil
+	Layer(LayerType) Layer
+	// LayerClass returns the first layer in this packet of the given class,
+	// or nil.
+	LayerClass(LayerClass) Layer
+
+	//// Functions for accessing specific types of packet layers.  These functions
+	//// return the first layer of each type found within the packet.
+	//// ------------------------------------------------------------------
+	// LinkLayer returns the first link layer in the packet
+	LinkLayer() LinkLayer
+	// NetworkLayer returns the first network layer in the packet
+	NetworkLayer() NetworkLayer
+	// TransportLayer returns the first transport layer in the packet
+	TransportLayer() TransportLayer
+	// ApplicationLayer returns the first application layer in the packet
+	ApplicationLayer() ApplicationLayer
+	// ErrorLayer is particularly useful, since it returns nil if the packet
+	// was fully decoded successfully, and non-nil if an error was encountered
+	// in decoding and the packet was only partially decoded.  Thus, its output
+	// can be used to determine if the entire packet was able to be decoded.
+	ErrorLayer() ErrorLayer
+
+	//// Functions for accessing data specific to the packet:
+	//// ------------------------------------------------------------------
+	// Data returns the set of bytes that make up this entire packet.
+	Data() []byte
+	// Metadata returns packet metadata associated with this packet.
+	Metadata() *PacketMetadata
+}
+
+// packet contains all the information we need to fulfill the Packet interface,
+// and its two "subclasses" (yes, no such thing in Go, bear with me),
+// eagerPacket and lazyPacket, provide eager and lazy decoding logic around the
+// various functions needed to access this information.
+type packet struct {
+	// data contains the entire packet data for a packet
+	data []byte
+	// initialLayers is space for an initial set of layers already created inside
+	// the packet.
+	initialLayers [6]Layer
+	// layers contains each layer we've already decoded
+	layers []Layer
+	// last is the last layer added to the packet
+	last Layer
+	// metadata is the PacketMetadata for this packet
+	metadata PacketMetadata
+
+	decodeOptions DecodeOptions
+
+	// Pointers to the various important layers
+	link        LinkLayer
+	network     NetworkLayer
+	transport   TransportLayer
+	application ApplicationLayer
+	failure     ErrorLayer
+}
+
+func (p *packet) SetTruncated() {
+	p.metadata.Truncated = true
+}
+
+func (p *packet) SetLinkLayer(l LinkLayer) {
+	if p.link == nil {
+		p.link = l
+	}
+}
+
+func (p *packet) SetNetworkLayer(l NetworkLayer) {
+	if p.network == nil {
+		p.network = l
+	}
+}
+
+func (p *packet) SetTransportLayer(l TransportLayer) {
+	if p.transport == nil {
+		p.transport = l
+	}
+}
+
+func (p *packet) SetApplicationLayer(l ApplicationLayer) {
+	if p.application == nil {
+		p.application = l
+	}
+}
+
+func (p *packet) SetErrorLayer(l ErrorLayer) {
+	if p.failure == nil {
+		p.failure = l
+	}
+}
+
+func (p *packet) AddLayer(l Layer) {
+	p.layers = append(p.layers, l)
+	p.last = l
+}
+
+func (p *packet) DumpPacketData() {
+	fmt.Fprint(os.Stderr, p.packetDump())
+	os.Stderr.Sync()
+}
+
+func (p *packet) Metadata() *PacketMetadata {
+	return &p.metadata
+}
+
+func (p *packet) Data() []byte {
+	return p.data
+}
+
+func (p *packet) DecodeOptions() *DecodeOptions {
+	return &p.decodeOptions
+}
+
+func (p *packet) addFinalDecodeError(err error, stack []byte) {
+	fail := &DecodeFailure{err: err, stack: stack}
+	if p.last == nil {
+		fail.data = p.data
+	} else {
+		fail.data = p.last.LayerPayload()
+	}
+	p.AddLayer(fail)
+	p.SetErrorLayer(fail)
+}
+
+func (p *packet) recoverDecodeError() {
+	if !p.decodeOptions.SkipDecodeRecovery {
+		if r := recover(); r != nil {
+			p.addFinalDecodeError(fmt.Errorf("%v", r), debug.Stack())
+		}
+	}
+}
+
+// LayerString outputs an individual layer as a string.  The layer is output
+// in a single line, with no trailing newline.  This function is specifically
+// designed to do the right thing for most layers... it follows the following
+// rules:
+//  * If the Layer has a String function, just output that.
+//  * Otherwise, output all exported fields in the layer, recursing into
+//    exported slices and structs.
+// NOTE:  This is NOT THE SAME AS fmt's "%#v".  %#v will output both exported
+// and unexported fields... many times packet layers contain unexported stuff
+// that would just mess up the output of the layer, see for example the
+// Payload layer and it's internal 'data' field, which contains a large byte
+// array that would really mess up formatting.
+func LayerString(l Layer) string {
+	return fmt.Sprintf("%v\t%s", l.LayerType(), layerString(reflect.ValueOf(l), false, false))
+}
+
+// Dumper dumps verbose information on a value.  If a layer type implements
+// Dumper, then its LayerDump() string will include the results in its output.
+type Dumper interface {
+	Dump() string
+}
+
+// LayerDump outputs a very verbose string representation of a layer.  Its
+// output is a concatenation of LayerString(l) and hex.Dump(l.LayerContents()).
+// It contains newlines and ends with a newline.
+func LayerDump(l Layer) string {
+	var b bytes.Buffer
+	b.WriteString(LayerString(l))
+	b.WriteByte('\n')
+	if d, ok := l.(Dumper); ok {
+		dump := d.Dump()
+		if dump != "" {
+			b.WriteString(dump)
+			if dump[len(dump)-1] != '\n' {
+				b.WriteByte('\n')
+			}
+		}
+	}
+	b.WriteString(hex.Dump(l.LayerContents()))
+	return b.String()
+}
+
+// layerString outputs, recursively, a layer in a "smart" way.  See docs for
+// LayerString for more details.
+//
+// Params:
+//   i - value to write out
+//   anonymous:  if we're currently recursing an anonymous member of a struct
+//   writeSpace:  if we've already written a value in a struct, and need to
+//     write a space before writing more.  This happens when we write various
+//     anonymous values, and need to keep writing more.
+func layerString(v reflect.Value, anonymous bool, writeSpace bool) string {
+	// Let String() functions take precedence.
+	if v.CanInterface() {
+		if s, ok := v.Interface().(fmt.Stringer); ok {
+			return s.String()
+		}
+	}
+	// Reflect, and spit out all the exported fields as key=value.
+	switch v.Type().Kind() {
+	case reflect.Interface, reflect.Ptr:
+		if v.IsNil() {
+			return "nil"
+		}
+		r := v.Elem()
+		return layerString(r, anonymous, writeSpace)
+	case reflect.Struct:
+		var b bytes.Buffer
+		typ := v.Type()
+		if !anonymous {
+			b.WriteByte('{')
+		}
+		for i := 0; i < v.NumField(); i++ {
+			// Check if this is upper-case.
+			ftype := typ.Field(i)
+			f := v.Field(i)
+			if ftype.Anonymous {
+				anonStr := layerString(f, true, writeSpace)
+				writeSpace = writeSpace || anonStr != ""
+				b.WriteString(anonStr)
+			} else if ftype.PkgPath == "" { // exported
+				if writeSpace {
+					b.WriteByte(' ')
+				}
+				writeSpace = true
+				fmt.Fprintf(&b, "%s=%s", typ.Field(i).Name, layerString(f, false, writeSpace))
+			}
+		}
+		if !anonymous {
+			b.WriteByte('}')
+		}
+		return b.String()
+	case reflect.Slice:
+		var b bytes.Buffer
+		b.WriteByte('[')
+		if v.Len() > 4 {
+			fmt.Fprintf(&b, "..%d..", v.Len())
+		} else {
+			for j := 0; j < v.Len(); j++ {
+				if j != 0 {
+					b.WriteString(", ")
+				}
+				b.WriteString(layerString(v.Index(j), false, false))
+			}
+		}
+		b.WriteByte(']')
+		return b.String()
+	}
+	return fmt.Sprintf("%v", v.Interface())
+}
+
+const (
+	longBytesLength = 128
+)
+
+// LongBytesGoString returns a string representation of the byte slice shortened
+// using the format '<type>{<truncated slice> ... (<n> bytes)}' if it
+// exceeds a predetermined length. Can be used to avoid filling the display with
+// very long byte strings.
+func LongBytesGoString(buf []byte) string {
+	if len(buf) < longBytesLength {
+		return fmt.Sprintf("%#v", buf)
+	}
+	s := fmt.Sprintf("%#v", buf[:longBytesLength-1])
+	s = strings.TrimSuffix(s, "}")
+	return fmt.Sprintf("%s ... (%d bytes)}", s, len(buf))
+}
+
+func baseLayerString(value reflect.Value) string {
+	t := value.Type()
+	content := value.Field(0)
+	c := make([]byte, content.Len())
+	for i := range c {
+		c[i] = byte(content.Index(i).Uint())
+	}
+	payload := value.Field(1)
+	p := make([]byte, payload.Len())
+	for i := range p {
+		p[i] = byte(payload.Index(i).Uint())
+	}
+	return fmt.Sprintf("%s{Contents:%s, Payload:%s}", t.String(),
+		LongBytesGoString(c),
+		LongBytesGoString(p))
+}
+
+func layerGoString(i interface{}, b *bytes.Buffer) {
+	if s, ok := i.(fmt.GoStringer); ok {
+		b.WriteString(s.GoString())
+		return
+	}
+
+	var v reflect.Value
+	var ok bool
+	if v, ok = i.(reflect.Value); !ok {
+		v = reflect.ValueOf(i)
+	}
+	switch v.Kind() {
+	case reflect.Ptr, reflect.Interface:
+		if v.Kind() == reflect.Ptr {
+			b.WriteByte('&')
+		}
+		layerGoString(v.Elem().Interface(), b)
+	case reflect.Struct:
+		t := v.Type()
+		b.WriteString(t.String())
+		b.WriteByte('{')
+		for i := 0; i < v.NumField(); i++ {
+			if i > 0 {
+				b.WriteString(", ")
+			}
+			if t.Field(i).Name == "BaseLayer" {
+				fmt.Fprintf(b, "BaseLayer:%s", baseLayerString(v.Field(i)))
+			} else if v.Field(i).Kind() == reflect.Struct {
+				fmt.Fprintf(b, "%s:", t.Field(i).Name)
+				layerGoString(v.Field(i), b)
+			} else if v.Field(i).Kind() == reflect.Ptr {
+				b.WriteByte('&')
+				layerGoString(v.Field(i), b)
+			} else {
+				fmt.Fprintf(b, "%s:%#v", t.Field(i).Name, v.Field(i))
+			}
+		}
+		b.WriteByte('}')
+	default:
+		fmt.Fprintf(b, "%#v", i)
+	}
+}
+
+// LayerGoString returns a representation of the layer in Go syntax,
+// taking care to shorten "very long" BaseLayer byte slices
+func LayerGoString(l Layer) string {
+	b := new(bytes.Buffer)
+	layerGoString(l, b)
+	return b.String()
+}
+
+func (p *packet) packetString() string {
+	var b bytes.Buffer
+	fmt.Fprintf(&b, "PACKET: %d bytes", len(p.Data()))
+	if p.metadata.Truncated {
+		b.WriteString(", truncated")
+	}
+	if p.metadata.Length > 0 {
+		fmt.Fprintf(&b, ", wire length %d cap length %d", p.metadata.Length, p.metadata.CaptureLength)
+	}
+	if !p.metadata.Timestamp.IsZero() {
+		fmt.Fprintf(&b, " @ %v", p.metadata.Timestamp)
+	}
+	b.WriteByte('\n')
+	for i, l := range p.layers {
+		fmt.Fprintf(&b, "- Layer %d (%02d bytes) = %s\n", i+1, len(l.LayerContents()), LayerString(l))
+	}
+	return b.String()
+}
+
+func (p *packet) packetDump() string {
+	var b bytes.Buffer
+	fmt.Fprintf(&b, "-- FULL PACKET DATA (%d bytes) ------------------------------------\n%s", len(p.data), hex.Dump(p.data))
+	for i, l := range p.layers {
+		fmt.Fprintf(&b, "--- Layer %d ---\n%s", i+1, LayerDump(l))
+	}
+	return b.String()
+}
+
+// eagerPacket is a packet implementation that does eager decoding.  Upon
+// initial construction, it decodes all the layers it can from packet data.
+// eagerPacket implements Packet and PacketBuilder.
+type eagerPacket struct {
+	packet
+}
+
+var errNilDecoder = errors.New("NextDecoder passed nil decoder, probably an unsupported decode type")
+
+func (p *eagerPacket) NextDecoder(next Decoder) error {
+	if next == nil {
+		return errNilDecoder
+	}
+	if p.last == nil {
+		return errors.New("NextDecoder called, but no layers added yet")
+	}
+	d := p.last.LayerPayload()
+	if len(d) == 0 {
+		return nil
+	}
+	// Since we're eager, immediately call the next decoder.
+	return next.Decode(d, p)
+}
+func (p *eagerPacket) initialDecode(dec Decoder) {
+	defer p.recoverDecodeError()
+	err := dec.Decode(p.data, p)
+	if err != nil {
+		p.addFinalDecodeError(err, nil)
+	}
+}
+func (p *eagerPacket) LinkLayer() LinkLayer {
+	return p.link
+}
+func (p *eagerPacket) NetworkLayer() NetworkLayer {
+	return p.network
+}
+func (p *eagerPacket) TransportLayer() TransportLayer {
+	return p.transport
+}
+func (p *eagerPacket) ApplicationLayer() ApplicationLayer {
+	return p.application
+}
+func (p *eagerPacket) ErrorLayer() ErrorLayer {
+	return p.failure
+}
+func (p *eagerPacket) Layers() []Layer {
+	return p.layers
+}
+func (p *eagerPacket) Layer(t LayerType) Layer {
+	for _, l := range p.layers {
+		if l.LayerType() == t {
+			return l
+		}
+	}
+	return nil
+}
+func (p *eagerPacket) LayerClass(lc LayerClass) Layer {
+	for _, l := range p.layers {
+		if lc.Contains(l.LayerType()) {
+			return l
+		}
+	}
+	return nil
+}
+func (p *eagerPacket) String() string { return p.packetString() }
+func (p *eagerPacket) Dump() string   { return p.packetDump() }
+
+// lazyPacket does lazy decoding on its packet data.  On construction it does
+// no initial decoding.  For each function call, it decodes only as many layers
+// as are necessary to compute the return value for that function.
+// lazyPacket implements Packet and PacketBuilder.
+type lazyPacket struct {
+	packet
+	next Decoder
+}
+
+func (p *lazyPacket) NextDecoder(next Decoder) error {
+	if next == nil {
+		return errNilDecoder
+	}
+	p.next = next
+	return nil
+}
+func (p *lazyPacket) decodeNextLayer() {
+	if p.next == nil {
+		return
+	}
+	d := p.data
+	if p.last != nil {
+		d = p.last.LayerPayload()
+	}
+	next := p.next
+	p.next = nil
+	// We've just set p.next to nil, so if we see we have no data, this should be
+	// the final call we get to decodeNextLayer if we return here.
+	if len(d) == 0 {
+		return
+	}
+	defer p.recoverDecodeError()
+	err := next.Decode(d, p)
+	if err != nil {
+		p.addFinalDecodeError(err, nil)
+	}
+}
+func (p *lazyPacket) LinkLayer() LinkLayer {
+	for p.link == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.link
+}
+func (p *lazyPacket) NetworkLayer() NetworkLayer {
+	for p.network == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.network
+}
+func (p *lazyPacket) TransportLayer() TransportLayer {
+	for p.transport == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.transport
+}
+func (p *lazyPacket) ApplicationLayer() ApplicationLayer {
+	for p.application == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.application
+}
+func (p *lazyPacket) ErrorLayer() ErrorLayer {
+	for p.failure == nil && p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.failure
+}
+func (p *lazyPacket) Layers() []Layer {
+	for p.next != nil {
+		p.decodeNextLayer()
+	}
+	return p.layers
+}
+func (p *lazyPacket) Layer(t LayerType) Layer {
+	for _, l := range p.layers {
+		if l.LayerType() == t {
+			return l
+		}
+	}
+	numLayers := len(p.layers)
+	for p.next != nil {
+		p.decodeNextLayer()
+		for _, l := range p.layers[numLayers:] {
+			if l.LayerType() == t {
+				return l
+			}
+		}
+		numLayers = len(p.layers)
+	}
+	return nil
+}
+func (p *lazyPacket) LayerClass(lc LayerClass) Layer {
+	for _, l := range p.layers {
+		if lc.Contains(l.LayerType()) {
+			return l
+		}
+	}
+	numLayers := len(p.layers)
+	for p.next != nil {
+		p.decodeNextLayer()
+		for _, l := range p.layers[numLayers:] {
+			if lc.Contains(l.LayerType()) {
+				return l
+			}
+		}
+		numLayers = len(p.layers)
+	}
+	return nil
+}
+func (p *lazyPacket) String() string { p.Layers(); return p.packetString() }
+func (p *lazyPacket) Dump() string   { p.Layers(); return p.packetDump() }
+
+// DecodeOptions tells gopacket how to decode a packet.
+type DecodeOptions struct {
+	// Lazy decoding decodes the minimum number of layers needed to return data
+	// for a packet at each function call.  Be careful using this with concurrent
+	// packet processors, as each call to packet.* could mutate the packet, and
+	// two concurrent function calls could interact poorly.
+	Lazy bool
+	// NoCopy decoding doesn't copy its input buffer into storage that's owned by
+	// the packet.  If you can guarantee that the bytes underlying the slice
+	// passed into NewPacket aren't going to be modified, this can be faster.  If
+	// there's any chance that those bytes WILL be changed, this will invalidate
+	// your packets.
+	NoCopy bool
+	// SkipDecodeRecovery skips over panic recovery during packet decoding.
+	// Normally, when packets decode, if a panic occurs, that panic is captured
+	// by a recover(), and a DecodeFailure layer is added to the packet detailing
+	// the issue.  If this flag is set, panics are instead allowed to continue up
+	// the stack.
+	SkipDecodeRecovery bool
+	// DecodeStreamsAsDatagrams enables routing of application-level layers in the TCP
+	// decoder. If true, we should try to decode layers after TCP in single packets.
+	// This is disabled by default because the reassembly package drives the decoding
+	// of TCP payload data after reassembly.
+	DecodeStreamsAsDatagrams bool
+}
+
+// Default decoding provides the safest (but slowest) method for decoding
+// packets.  It eagerly processes all layers (so it's concurrency-safe) and it
+// copies its input buffer upon creation of the packet (so the packet remains
+// valid if the underlying slice is modified.  Both of these take time,
+// though, so beware.  If you can guarantee that the packet will only be used
+// by one goroutine at a time, set Lazy decoding.  If you can guarantee that
+// the underlying slice won't change, set NoCopy decoding.
+var Default = DecodeOptions{}
+
+// Lazy is a DecodeOptions with just Lazy set.
+var Lazy = DecodeOptions{Lazy: true}
+
+// NoCopy is a DecodeOptions with just NoCopy set.
+var NoCopy = DecodeOptions{NoCopy: true}
+
+// DecodeStreamsAsDatagrams is a DecodeOptions with just DecodeStreamsAsDatagrams set.
+var DecodeStreamsAsDatagrams = DecodeOptions{DecodeStreamsAsDatagrams: true}
+
+// NewPacket creates a new Packet object from a set of bytes.  The
+// firstLayerDecoder tells it how to interpret the first layer from the bytes,
+// future layers will be generated from that first layer automatically.
+func NewPacket(data []byte, firstLayerDecoder Decoder, options DecodeOptions) Packet {
+	if !options.NoCopy {
+		dataCopy := make([]byte, len(data))
+		copy(dataCopy, data)
+		data = dataCopy
+	}
+	if options.Lazy {
+		p := &lazyPacket{
+			packet: packet{data: data, decodeOptions: options},
+			next:   firstLayerDecoder,
+		}
+		p.layers = p.initialLayers[:0]
+		// Crazy craziness:
+		// If the following return statemet is REMOVED, and Lazy is FALSE, then
+		// eager packet processing becomes 17% FASTER.  No, there is no logical
+		// explanation for this.  However, it's such a hacky micro-optimization that
+		// we really can't rely on it.  It appears to have to do with the size the
+		// compiler guesses for this function's stack space, since one symptom is
+		// that with the return statement in place, we more than double calls to
+		// runtime.morestack/runtime.lessstack.  We'll hope the compiler gets better
+		// over time and we get this optimization for free.  Until then, we'll have
+		// to live with slower packet processing.
+		return p
+	}
+	p := &eagerPacket{
+		packet: packet{data: data, decodeOptions: options},
+	}
+	p.layers = p.initialLayers[:0]
+	p.initialDecode(firstLayerDecoder)
+	return p
+}
+
+// PacketDataSource is an interface for some source of packet data.  Users may
+// create their own implementations, or use the existing implementations in
+// gopacket/pcap (libpcap, allows reading from live interfaces or from
+// pcap files) or gopacket/pfring (PF_RING, allows reading from live
+// interfaces).
+type PacketDataSource interface {
+	// ReadPacketData returns the next packet available from this data source.
+	// It returns:
+	//  data:  The bytes of an individual packet.
+	//  ci:  Metadata about the capture
+	//  err:  An error encountered while reading packet data.  If err != nil,
+	//    then data/ci will be ignored.
+	ReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// ConcatFinitePacketDataSources returns a PacketDataSource that wraps a set
+// of internal PacketDataSources, each of which will stop with io.EOF after
+// reading a finite number of packets.  The returned PacketDataSource will
+// return all packets from the first finite source, followed by all packets from
+// the second, etc.  Once all finite sources have returned io.EOF, the returned
+// source will as well.
+func ConcatFinitePacketDataSources(pds ...PacketDataSource) PacketDataSource {
+	c := concat(pds)
+	return &c
+}
+
+type concat []PacketDataSource
+
+func (c *concat) ReadPacketData() (data []byte, ci CaptureInfo, err error) {
+	for len(*c) > 0 {
+		data, ci, err = (*c)[0].ReadPacketData()
+		if err == io.EOF {
+			*c = (*c)[1:]
+			continue
+		}
+		return
+	}
+	return nil, CaptureInfo{}, io.EOF
+}
+
+// ZeroCopyPacketDataSource is an interface to pull packet data from sources
+// that allow data to be returned without copying to a user-controlled buffer.
+// It's very similar to PacketDataSource, except that the caller must be more
+// careful in how the returned buffer is handled.
+type ZeroCopyPacketDataSource interface {
+	// ZeroCopyReadPacketData returns the next packet available from this data source.
+	// It returns:
+	//  data:  The bytes of an individual packet.  Unlike with
+	//    PacketDataSource's ReadPacketData, the slice returned here points
+	//    to a buffer owned by the data source.  In particular, the bytes in
+	//    this buffer may be changed by future calls to
+	//    ZeroCopyReadPacketData.  Do not use the returned buffer after
+	//    subsequent ZeroCopyReadPacketData calls.
+	//  ci:  Metadata about the capture
+	//  err:  An error encountered while reading packet data.  If err != nil,
+	//    then data/ci will be ignored.
+	ZeroCopyReadPacketData() (data []byte, ci CaptureInfo, err error)
+}
+
+// PacketSource reads in packets from a PacketDataSource, decodes them, and
+// returns them.
+//
+// There are currently two different methods for reading packets in through
+// a PacketSource:
+//
+// Reading With Packets Function
+//
+// This method is the most convenient and easiest to code, but lacks
+// flexibility.  Packets returns a 'chan Packet', then asynchronously writes
+// packets into that channel.  Packets uses a blocking channel, and closes
+// it if an io.EOF is returned by the underlying PacketDataSource.  All other
+// PacketDataSource errors are ignored and discarded.
+//  for packet := range packetSource.Packets() {
+//    ...
+//  }
+//
+// Reading With NextPacket Function
+//
+// This method is the most flexible, and exposes errors that may be
+// encountered by the underlying PacketDataSource.  It's also the fastest
+// in a tight loop, since it doesn't have the overhead of a channel
+// read/write.  However, it requires the user to handle errors, most
+// importantly the io.EOF error in cases where packets are being read from
+// a file.
+//  for {
+//    packet, err := packetSource.NextPacket()
+//    if err == io.EOF {
+//      break
+//    } else if err != nil {
+//      log.Println("Error:", err)
+//      continue
+//    }
+//    handlePacket(packet)  // Do something with each packet.
+//  }
+type PacketSource struct {
+	source  PacketDataSource
+	decoder Decoder
+	// DecodeOptions is the set of options to use for decoding each piece
+	// of packet data.  This can/should be changed by the user to reflect the
+	// way packets should be decoded.
+	DecodeOptions
+	c chan Packet
+}
+
+// NewPacketSource creates a packet data source.
+func NewPacketSource(source PacketDataSource, decoder Decoder) *PacketSource {
+	return &PacketSource{
+		source:  source,
+		decoder: decoder,
+	}
+}
+
+// NextPacket returns the next decoded packet from the PacketSource.  On error,
+// it returns a nil packet and a non-nil error.
+func (p *PacketSource) NextPacket() (Packet, error) {
+	data, ci, err := p.source.ReadPacketData()
+	if err != nil {
+		return nil, err
+	}
+	packet := NewPacket(data, p.decoder, p.DecodeOptions)
+	m := packet.Metadata()
+	m.CaptureInfo = ci
+	m.Truncated = m.Truncated || ci.CaptureLength < ci.Length
+	return packet, nil
+}
+
+// packetsToChannel reads in all packets from the packet source and sends them
+// to the given channel. This routine terminates when a non-temporary error
+// is returned by NextPacket().
+func (p *PacketSource) packetsToChannel() {
+	defer close(p.c)
+	for {
+		packet, err := p.NextPacket()
+		if err == nil {
+			p.c <- packet
+			continue
+		}
+
+		// Immediately retry for temporary network errors
+		if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
+			continue
+		}
+
+		// Immediately retry for EAGAIN
+		if err == syscall.EAGAIN {
+			continue
+		}
+
+		// Immediately break for known unrecoverable errors
+		if err == io.EOF || err == io.ErrUnexpectedEOF ||
+			err == io.ErrNoProgress || err == io.ErrClosedPipe || err == io.ErrShortBuffer ||
+			err == syscall.EBADF ||
+			strings.Contains(err.Error(), "use of closed file") {
+			break
+		}
+
+		// Sleep briefly and try again
+		time.Sleep(time.Millisecond * time.Duration(5))
+	}
+}
+
+// Packets returns a channel of packets, allowing easy iterating over
+// packets.  Packets will be asynchronously read in from the underlying
+// PacketDataSource and written to the returned channel.  If the underlying
+// PacketDataSource returns an io.EOF error, the channel will be closed.
+// If any other error is encountered, it is ignored.
+//
+//  for packet := range packetSource.Packets() {
+//    handlePacket(packet)  // Do something with each packet.
+//  }
+//
+// If called more than once, returns the same channel.
+func (p *PacketSource) Packets() chan Packet {
+	if p.c == nil {
+		p.c = make(chan Packet, 1000)
+		go p.packetsToChannel()
+	}
+	return p.c
+}
diff --git a/vendor/github.com/google/gopacket/parser.go b/vendor/github.com/google/gopacket/parser.go
new file mode 100644
index 0000000..4a4676f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/parser.go
@@ -0,0 +1,350 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+)
+
+// A container for single LayerType->DecodingLayer mapping.
+type decodingLayerElem struct {
+	typ LayerType
+	dec DecodingLayer
+}
+
+// DecodingLayer is an interface for packet layers that can decode themselves.
+//
+// The important part of DecodingLayer is that they decode themselves in-place.
+// Calling DecodeFromBytes on a DecodingLayer totally resets the entire layer to
+// the new state defined by the data passed in.  A returned error leaves the
+// DecodingLayer in an unknown intermediate state, thus its fields should not be
+// trusted.
+//
+// Because the DecodingLayer is resetting its own fields, a call to
+// DecodeFromBytes should normally not require any memory allocation.
+type DecodingLayer interface {
+	// DecodeFromBytes resets the internal state of this layer to the state
+	// defined by the passed-in bytes.  Slices in the DecodingLayer may
+	// reference the passed-in data, so care should be taken to copy it
+	// first should later modification of data be required before the
+	// DecodingLayer is discarded.
+	DecodeFromBytes(data []byte, df DecodeFeedback) error
+	// CanDecode returns the set of LayerTypes this DecodingLayer can
+	// decode.  For Layers that are also DecodingLayers, this will most
+	// often be that Layer's LayerType().
+	CanDecode() LayerClass
+	// NextLayerType returns the LayerType which should be used to decode
+	// the LayerPayload.
+	NextLayerType() LayerType
+	// LayerPayload is the set of bytes remaining to decode after a call to
+	// DecodeFromBytes.
+	LayerPayload() []byte
+}
+
+// DecodingLayerFunc decodes given packet and stores decoded LayerType
+// values into specified slice. Returns either first encountered
+// unsupported LayerType value or decoding error. In case of success,
+// returns (LayerTypeZero, nil).
+type DecodingLayerFunc func([]byte, *[]LayerType) (LayerType, error)
+
+// DecodingLayerContainer stores all DecodingLayer-s and serves as a
+// searching tool for DecodingLayerParser.
+type DecodingLayerContainer interface {
+	// Put adds new DecodingLayer to container. The new instance of
+	// the same DecodingLayerContainer is returned so it may be
+	// implemented as a value receiver.
+	Put(DecodingLayer) DecodingLayerContainer
+	// Decoder returns DecodingLayer to decode given LayerType and
+	// true if it was found. If no decoder found, return false.
+	Decoder(LayerType) (DecodingLayer, bool)
+	// LayersDecoder returns DecodingLayerFunc which decodes given
+	// packet, starting with specified LayerType and DecodeFeedback.
+	LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc
+}
+
+// DecodingLayerSparse is a sparse array-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is addressed in an
+// allocated slice by LayerType value itself. Though this is the
+// fastest container it may be memory-consuming if used with big
+// LayerType values.
+type DecodingLayerSparse []DecodingLayer
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) Put(d DecodingLayer) DecodingLayerContainer {
+	maxLayerType := LayerType(len(dl) - 1)
+	for _, typ := range d.CanDecode().LayerTypes() {
+		if typ > maxLayerType {
+			maxLayerType = typ
+		}
+	}
+
+	if extra := maxLayerType - LayerType(len(dl)) + 1; extra > 0 {
+		dl = append(dl, make([]DecodingLayer, extra)...)
+	}
+
+	for _, typ := range d.CanDecode().LayerTypes() {
+		dl[typ] = d
+	}
+	return dl
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+	return LayersDecoder(dl, first, df)
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerSparse) Decoder(typ LayerType) (DecodingLayer, bool) {
+	if int64(typ) < int64(len(dl)) {
+		decoder := dl[typ]
+		return decoder, decoder != nil
+	}
+	return nil, false
+}
+
+// DecodingLayerArray is an array-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is searched linearly in
+// an allocated slice in one-by-one fashion.
+type DecodingLayerArray []decodingLayerElem
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) Put(d DecodingLayer) DecodingLayerContainer {
+TYPES:
+	for _, typ := range d.CanDecode().LayerTypes() {
+		for i := range dl {
+			if dl[i].typ == typ {
+				dl[i].dec = d
+				continue TYPES
+			}
+		}
+		dl = append(dl, decodingLayerElem{typ, d})
+	}
+	return dl
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) Decoder(typ LayerType) (DecodingLayer, bool) {
+	for i := range dl {
+		if dl[i].typ == typ {
+			return dl[i].dec, true
+		}
+	}
+	return nil, false
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerArray) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+	return LayersDecoder(dl, first, df)
+}
+
+// DecodingLayerMap is an map-based implementation of
+// DecodingLayerContainer. Each DecodingLayer is searched in a map
+// hashed by LayerType value.
+type DecodingLayerMap map[LayerType]DecodingLayer
+
+// Put implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) Put(d DecodingLayer) DecodingLayerContainer {
+	for _, typ := range d.CanDecode().LayerTypes() {
+		if dl == nil {
+			dl = make(map[LayerType]DecodingLayer)
+		}
+		dl[typ] = d
+	}
+	return dl
+}
+
+// Decoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) Decoder(typ LayerType) (DecodingLayer, bool) {
+	d, ok := dl[typ]
+	return d, ok
+}
+
+// LayersDecoder implements DecodingLayerContainer interface.
+func (dl DecodingLayerMap) LayersDecoder(first LayerType, df DecodeFeedback) DecodingLayerFunc {
+	return LayersDecoder(dl, first, df)
+}
+
+// Static code check.
+var (
+	_ = []DecodingLayerContainer{
+		DecodingLayerSparse(nil),
+		DecodingLayerMap(nil),
+		DecodingLayerArray(nil),
+	}
+)
+
+// DecodingLayerParser parses a given set of layer types.  See DecodeLayers for
+// more information on how DecodingLayerParser should be used.
+type DecodingLayerParser struct {
+	// DecodingLayerParserOptions is the set of options available to the
+	// user to define the parser's behavior.
+	DecodingLayerParserOptions
+	dlc   DecodingLayerContainer
+	first LayerType
+	df    DecodeFeedback
+
+	decodeFunc DecodingLayerFunc
+
+	// Truncated is set when a decode layer detects that the packet has been
+	// truncated.
+	Truncated bool
+}
+
+// AddDecodingLayer adds a decoding layer to the parser.  This adds support for
+// the decoding layer's CanDecode layers to the parser... should they be
+// encountered, they'll be parsed.
+func (l *DecodingLayerParser) AddDecodingLayer(d DecodingLayer) {
+	l.SetDecodingLayerContainer(l.dlc.Put(d))
+}
+
+// SetTruncated is used by DecodingLayers to set the Truncated boolean in the
+// DecodingLayerParser.  Users should simply read Truncated after calling
+// DecodeLayers.
+func (l *DecodingLayerParser) SetTruncated() {
+	l.Truncated = true
+}
+
+// NewDecodingLayerParser creates a new DecodingLayerParser and adds in all
+// of the given DecodingLayers with AddDecodingLayer.
+//
+// Each call to DecodeLayers will attempt to decode the given bytes first by
+// treating them as a 'first'-type layer, then by using NextLayerType on
+// subsequently decoded layers to find the next relevant decoder.  Should a
+// deoder not be available for the layer type returned by NextLayerType,
+// decoding will stop.
+//
+// NewDecodingLayerParser uses DecodingLayerMap container by
+// default.
+func NewDecodingLayerParser(first LayerType, decoders ...DecodingLayer) *DecodingLayerParser {
+	dlp := &DecodingLayerParser{first: first}
+	dlp.df = dlp // Cast this once to the interface
+	// default container
+	dlc := DecodingLayerContainer(DecodingLayerMap(make(map[LayerType]DecodingLayer)))
+	for _, d := range decoders {
+		dlc = dlc.Put(d)
+	}
+
+	dlp.SetDecodingLayerContainer(dlc)
+	return dlp
+}
+
+// SetDecodingLayerContainer specifies container with decoders. This
+// call replaces all decoders already registered in given instance of
+// DecodingLayerParser.
+func (l *DecodingLayerParser) SetDecodingLayerContainer(dlc DecodingLayerContainer) {
+	l.dlc = dlc
+	l.decodeFunc = l.dlc.LayersDecoder(l.first, l.df)
+}
+
+// DecodeLayers decodes as many layers as possible from the given data.  It
+// initially treats the data as layer type 'typ', then uses NextLayerType on
+// each subsequent decoded layer until it gets to a layer type it doesn't know
+// how to parse.
+//
+// For each layer successfully decoded, DecodeLayers appends the layer type to
+// the decoded slice.  DecodeLayers truncates the 'decoded' slice initially, so
+// there's no need to empty it yourself.
+//
+// This decoding method is about an order of magnitude faster than packet
+// decoding, because it only decodes known layers that have already been
+// allocated.  This means it doesn't need to allocate each layer it returns...
+// instead it overwrites the layers that already exist.
+//
+// Example usage:
+//    func main() {
+//      var eth layers.Ethernet
+//      var ip4 layers.IPv4
+//      var ip6 layers.IPv6
+//      var tcp layers.TCP
+//      var udp layers.UDP
+//      var payload gopacket.Payload
+//      parser := gopacket.NewDecodingLayerParser(layers.LayerTypeEthernet, &eth, &ip4, &ip6, &tcp, &udp, &payload)
+//      var source gopacket.PacketDataSource = getMyDataSource()
+//      decodedLayers := make([]gopacket.LayerType, 0, 10)
+//      for {
+//        data, _, err := source.ReadPacketData()
+//        if err != nil {
+//          fmt.Println("Error reading packet data: ", err)
+//          continue
+//        }
+//        fmt.Println("Decoding packet")
+//        err = parser.DecodeLayers(data, &decodedLayers)
+//        for _, typ := range decodedLayers {
+//          fmt.Println("  Successfully decoded layer type", typ)
+//          switch typ {
+//            case layers.LayerTypeEthernet:
+//              fmt.Println("    Eth ", eth.SrcMAC, eth.DstMAC)
+//            case layers.LayerTypeIPv4:
+//              fmt.Println("    IP4 ", ip4.SrcIP, ip4.DstIP)
+//            case layers.LayerTypeIPv6:
+//              fmt.Println("    IP6 ", ip6.SrcIP, ip6.DstIP)
+//            case layers.LayerTypeTCP:
+//              fmt.Println("    TCP ", tcp.SrcPort, tcp.DstPort)
+//            case layers.LayerTypeUDP:
+//              fmt.Println("    UDP ", udp.SrcPort, udp.DstPort)
+//          }
+//        }
+//        if decodedLayers.Truncated {
+//          fmt.Println("  Packet has been truncated")
+//        }
+//        if err != nil {
+//          fmt.Println("  Error encountered:", err)
+//        }
+//      }
+//    }
+//
+// If DecodeLayers is unable to decode the next layer type, it will return the
+// error UnsupportedLayerType.
+func (l *DecodingLayerParser) DecodeLayers(data []byte, decoded *[]LayerType) (err error) {
+	l.Truncated = false
+	if !l.IgnorePanic {
+		defer panicToError(&err)
+	}
+	typ, err := l.decodeFunc(data, decoded)
+	if typ != LayerTypeZero {
+		// no decoder
+		if l.IgnoreUnsupported {
+			return nil
+		}
+		return UnsupportedLayerType(typ)
+	}
+	return err
+}
+
+// UnsupportedLayerType is returned by DecodingLayerParser if DecodeLayers
+// encounters a layer type that the DecodingLayerParser has no decoder for.
+type UnsupportedLayerType LayerType
+
+// Error implements the error interface, returning a string to say that the
+// given layer type is unsupported.
+func (e UnsupportedLayerType) Error() string {
+	return fmt.Sprintf("No decoder for layer type %v", LayerType(e))
+}
+
+func panicToError(e *error) {
+	if r := recover(); r != nil {
+		*e = fmt.Errorf("panic: %v", r)
+	}
+}
+
+// DecodingLayerParserOptions provides options to affect the behavior of a given
+// DecodingLayerParser.
+type DecodingLayerParserOptions struct {
+	// IgnorePanic determines whether a DecodingLayerParser should stop
+	// panics on its own (by returning them as an error from DecodeLayers)
+	// or should allow them to raise up the stack.  Handling errors does add
+	// latency to the process of decoding layers, but is much safer for
+	// callers.  IgnorePanic defaults to false, thus if the caller does
+	// nothing decode panics will be returned as errors.
+	IgnorePanic bool
+	// IgnoreUnsupported will stop parsing and return a nil error when it
+	// encounters a layer it doesn't have a parser for, instead of returning an
+	// UnsupportedLayerType error.  If this is true, it's up to the caller to make
+	// sure that all expected layers have been parsed (by checking the decoded
+	// slice).
+	IgnoreUnsupported bool
+}
diff --git a/vendor/github.com/google/gopacket/time.go b/vendor/github.com/google/gopacket/time.go
new file mode 100644
index 0000000..6d116cd
--- /dev/null
+++ b/vendor/github.com/google/gopacket/time.go
@@ -0,0 +1,72 @@
+// Copyright 2018 The GoPacket Authors. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+	"math"
+	"time"
+)
+
+// TimestampResolution represents the resolution of timestamps in Base^Exponent.
+type TimestampResolution struct {
+	Base, Exponent int
+}
+
+func (t TimestampResolution) String() string {
+	return fmt.Sprintf("%d^%d", t.Base, t.Exponent)
+}
+
+// ToDuration returns the smallest representable time difference as a time.Duration
+func (t TimestampResolution) ToDuration() time.Duration {
+	if t.Base == 0 {
+		return 0
+	}
+	if t.Exponent == 0 {
+		return time.Second
+	}
+	switch t.Base {
+	case 10:
+		return time.Duration(math.Pow10(t.Exponent + 9))
+	case 2:
+		if t.Exponent < 0 {
+			return time.Second >> uint(-t.Exponent)
+		}
+		return time.Second << uint(t.Exponent)
+	default:
+		// this might loose precision
+		return time.Duration(float64(time.Second) * math.Pow(float64(t.Base), float64(t.Exponent)))
+	}
+}
+
+// TimestampResolutionInvalid represents an invalid timestamp resolution
+var TimestampResolutionInvalid = TimestampResolution{}
+
+// TimestampResolutionMillisecond is a resolution of 10^-3s
+var TimestampResolutionMillisecond = TimestampResolution{10, -3}
+
+// TimestampResolutionMicrosecond is a resolution of 10^-6s
+var TimestampResolutionMicrosecond = TimestampResolution{10, -6}
+
+// TimestampResolutionNanosecond is a resolution of 10^-9s
+var TimestampResolutionNanosecond = TimestampResolution{10, -9}
+
+// TimestampResolutionNTP is the resolution of NTP timestamps which is 2^-32 ≈ 233 picoseconds
+var TimestampResolutionNTP = TimestampResolution{2, -32}
+
+// TimestampResolutionCaptureInfo is the resolution used in CaptureInfo, which his currently nanosecond
+var TimestampResolutionCaptureInfo = TimestampResolutionNanosecond
+
+// PacketSourceResolution is an interface for packet data sources that
+// support reporting the timestamp resolution of the aqcuired timestamps.
+// Returned timestamps will always have NanosecondTimestampResolution due
+// to the use of time.Time, but scaling might have occured if acquired
+// timestamps have a different resolution.
+type PacketSourceResolution interface {
+	// Resolution returns the timestamp resolution of acquired timestamps before scaling to NanosecondTimestampResolution.
+	Resolution() TimestampResolution
+}
diff --git a/vendor/github.com/google/gopacket/writer.go b/vendor/github.com/google/gopacket/writer.go
new file mode 100644
index 0000000..0e94b5b
--- /dev/null
+++ b/vendor/github.com/google/gopacket/writer.go
@@ -0,0 +1,256 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package gopacket
+
+import (
+	"fmt"
+)
+
+// SerializableLayer allows its implementations to be written out as a set of bytes,
+// so those bytes may be sent on the wire or otherwise used by the caller.
+// SerializableLayer is implemented by certain Layer types, and can be encoded to
+// bytes using the LayerWriter object.
+type SerializableLayer interface {
+	// SerializeTo writes this layer to a slice, growing that slice if necessary
+	// to make it fit the layer's data.
+	//  Args:
+	//   b:  SerializeBuffer to write this layer on to.  When called, b.Bytes()
+	//     is the payload this layer should wrap, if any.  Note that this
+	//     layer can either prepend itself (common), append itself
+	//     (uncommon), or both (sometimes padding or footers are required at
+	//     the end of packet data). It's also possible (though probably very
+	//     rarely needed) to overwrite any bytes in the current payload.
+	//     After this call, b.Bytes() should return the byte encoding of
+	//     this layer wrapping the original b.Bytes() payload.
+	//   opts:  options to use while writing out data.
+	//  Returns:
+	//   error if a problem was encountered during encoding.  If an error is
+	//   returned, the bytes in data should be considered invalidated, and
+	//   not used.
+	//
+	// SerializeTo calls SHOULD entirely ignore LayerContents and
+	// LayerPayload.  It just serializes based on struct fields, neither
+	// modifying nor using contents/payload.
+	SerializeTo(b SerializeBuffer, opts SerializeOptions) error
+	// LayerType returns the type of the layer that is being serialized to the buffer
+	LayerType() LayerType
+}
+
+// SerializeOptions provides options for behaviors that SerializableLayers may want to
+// implement.
+type SerializeOptions struct {
+	// FixLengths determines whether, during serialization, layers should fix
+	// the values for any length field that depends on the payload.
+	FixLengths bool
+	// ComputeChecksums determines whether, during serialization, layers
+	// should recompute checksums based on their payloads.
+	ComputeChecksums bool
+}
+
+// SerializeBuffer is a helper used by gopacket for writing out packet layers.
+// SerializeBuffer starts off as an empty []byte.  Subsequent calls to PrependBytes
+// return byte slices before the current Bytes(), AppendBytes returns byte
+// slices after.
+//
+// Byte slices returned by PrependBytes/AppendBytes are NOT zero'd out, so if
+// you want to make sure they're all zeros, set them as such.
+//
+// SerializeBuffer is specifically designed to handle packet writing, where unlike
+// with normal writes it's easier to start writing at the inner-most layer and
+// work out, meaning that we often need to prepend bytes.  This runs counter to
+// typical writes to byte slices using append(), where we only write at the end
+// of the buffer.
+//
+// It can be reused via Clear.  Note, however, that a Clear call will invalidate the
+// byte slices returned by any previous Bytes() call (the same buffer is
+// reused).
+//
+//  1) Reusing a write buffer is generally much faster than creating a new one,
+//     and with the default implementation it avoids additional memory allocations.
+//  2) If a byte slice from a previous Bytes() call will continue to be used,
+//     it's better to create a new SerializeBuffer.
+//
+// The Clear method is specifically designed to minimize memory allocations for
+// similar later workloads on the SerializeBuffer.  IE: if you make a set of
+// Prepend/Append calls, then clear, then make the same calls with the same
+// sizes, the second round (and all future similar rounds) shouldn't allocate
+// any new memory.
+type SerializeBuffer interface {
+	// Bytes returns the contiguous set of bytes collected so far by Prepend/Append
+	// calls.  The slice returned by Bytes will be modified by future Clear calls,
+	// so if you're planning on clearing this SerializeBuffer, you may want to copy
+	// Bytes somewhere safe first.
+	Bytes() []byte
+	// PrependBytes returns a set of bytes which prepends the current bytes in this
+	// buffer.  These bytes start in an indeterminate state, so they should be
+	// overwritten by the caller.  The caller must only call PrependBytes if they
+	// know they're going to immediately overwrite all bytes returned.
+	PrependBytes(num int) ([]byte, error)
+	// AppendBytes returns a set of bytes which appends the current bytes in this
+	// buffer.  These bytes start in an indeterminate state, so they should be
+	// overwritten by the caller.  The caller must only call AppendBytes if they
+	// know they're going to immediately overwrite all bytes returned.
+	AppendBytes(num int) ([]byte, error)
+	// Clear resets the SerializeBuffer to a new, empty buffer.  After a call to clear,
+	// the byte slice returned by any previous call to Bytes() for this buffer
+	// should be considered invalidated.
+	Clear() error
+	// Layers returns all the Layers that have been successfully serialized into this buffer
+	// already.
+	Layers() []LayerType
+	// PushLayer adds the current Layer to the list of Layers that have been serialized
+	// into this buffer.
+	PushLayer(LayerType)
+}
+
+type serializeBuffer struct {
+	data                []byte
+	start               int
+	prepended, appended int
+	layers              []LayerType
+}
+
+// NewSerializeBuffer creates a new instance of the default implementation of
+// the SerializeBuffer interface.
+func NewSerializeBuffer() SerializeBuffer {
+	return &serializeBuffer{}
+}
+
+// NewSerializeBufferExpectedSize creates a new buffer for serialization, optimized for an
+// expected number of bytes prepended/appended.  This tends to decrease the
+// number of memory allocations made by the buffer during writes.
+func NewSerializeBufferExpectedSize(expectedPrependLength, expectedAppendLength int) SerializeBuffer {
+	return &serializeBuffer{
+		data:      make([]byte, expectedPrependLength, expectedPrependLength+expectedAppendLength),
+		start:     expectedPrependLength,
+		prepended: expectedPrependLength,
+		appended:  expectedAppendLength,
+	}
+}
+
+func (w *serializeBuffer) Bytes() []byte {
+	return w.data[w.start:]
+}
+
+func (w *serializeBuffer) PrependBytes(num int) ([]byte, error) {
+	if num < 0 {
+		panic("num < 0")
+	}
+	if w.start < num {
+		toPrepend := w.prepended
+		if toPrepend < num {
+			toPrepend = num
+		}
+		w.prepended += toPrepend
+		length := cap(w.data) + toPrepend
+		newData := make([]byte, length)
+		newStart := w.start + toPrepend
+		copy(newData[newStart:], w.data[w.start:])
+		w.start = newStart
+		w.data = newData[:toPrepend+len(w.data)]
+	}
+	w.start -= num
+	return w.data[w.start : w.start+num], nil
+}
+
+func (w *serializeBuffer) AppendBytes(num int) ([]byte, error) {
+	if num < 0 {
+		panic("num < 0")
+	}
+	initialLength := len(w.data)
+	if cap(w.data)-initialLength < num {
+		toAppend := w.appended
+		if toAppend < num {
+			toAppend = num
+		}
+		w.appended += toAppend
+		newData := make([]byte, cap(w.data)+toAppend)
+		copy(newData[w.start:], w.data[w.start:])
+		w.data = newData[:initialLength]
+	}
+	// Grow the buffer.  We know it'll be under capacity given above.
+	w.data = w.data[:initialLength+num]
+	return w.data[initialLength:], nil
+}
+
+func (w *serializeBuffer) Clear() error {
+	w.start = w.prepended
+	w.data = w.data[:w.start]
+	w.layers = w.layers[:0]
+	return nil
+}
+
+func (w *serializeBuffer) Layers() []LayerType {
+	return w.layers
+}
+
+func (w *serializeBuffer) PushLayer(l LayerType) {
+	w.layers = append(w.layers, l)
+}
+
+// SerializeLayers clears the given write buffer, then writes all layers into it so
+// they correctly wrap each other.  Note that by clearing the buffer, it
+// invalidates all slices previously returned by w.Bytes()
+//
+// Example:
+//   buf := gopacket.NewSerializeBuffer()
+//   opts := gopacket.SerializeOptions{}
+//   gopacket.SerializeLayers(buf, opts, a, b, c)
+//   firstPayload := buf.Bytes()  // contains byte representation of a(b(c))
+//   gopacket.SerializeLayers(buf, opts, d, e, f)
+//   secondPayload := buf.Bytes()  // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
+func SerializeLayers(w SerializeBuffer, opts SerializeOptions, layers ...SerializableLayer) error {
+	w.Clear()
+	for i := len(layers) - 1; i >= 0; i-- {
+		layer := layers[i]
+		err := layer.SerializeTo(w, opts)
+		if err != nil {
+			return err
+		}
+		w.PushLayer(layer.LayerType())
+	}
+	return nil
+}
+
+// SerializeMultiLayers clears the given write buffer, then writes all layers into it so
+// they correctly wrap each other.  Note that by clearing the buffer, it
+// invalidates all slices previously returned by w.Bytes()
+//
+// Example:
+//   buf := gopacket.NewSerializeBuffer()
+//   opts := gopacket.SerializeOptions{}
+//   gopacket.SerializeLayers(buf, opts, a, b, c)
+//   firstPayload := buf.Bytes()  // contains byte representation of a(b(c))
+//   gopacket.SerializeLayers(buf, opts, d, e, f)
+//   secondPayload := buf.Bytes()  // contains byte representation of d(e(f)). firstPayload is now invalidated, since the SerializeLayers call Clears buf.
+func SerializeMultiLayers(w SerializeBuffer, opts SerializeOptions, layers []SerializableLayer) error {
+	w.Clear()
+	for i := len(layers) - 1; i >= 0; i-- {
+		layer := layers[i]
+		err := layer.SerializeTo(w, opts)
+		if err != nil {
+			return err
+		}
+		w.PushLayer(layer.LayerType())
+	}
+	return nil
+}
+
+// SerializePacket is a convenience function that calls SerializeLayers
+// on packet's Layers().
+// It returns an error if one of the packet layers is not a SerializableLayer.
+func SerializePacket(buf SerializeBuffer, opts SerializeOptions, packet Packet) error {
+	sls := []SerializableLayer{}
+	for _, layer := range packet.Layers() {
+		sl, ok := layer.(SerializableLayer)
+		if !ok {
+			return fmt.Errorf("layer %s is not serializable", layer.LayerType().String())
+		}
+		sls = append(sls, sl)
+	}
+	return SerializeLayers(buf, opts, sls...)
+}
diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml
new file mode 100644
index 0000000..d8156a6
--- /dev/null
+++ b/vendor/github.com/google/uuid/.travis.yml
@@ -0,0 +1,9 @@
+language: go
+
+go:
+  - 1.4.3
+  - 1.5.3
+  - tip
+
+script:
+  - go test -v ./...
diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md
new file mode 100644
index 0000000..04fdf09
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTING.md
@@ -0,0 +1,10 @@
+# How to contribute
+
+We definitely welcome patches and contribution to this project!
+
+### Legal requirements
+
+In order to protect both you and ourselves, you will need to sign the
+[Contributor License Agreement](https://cla.developers.google.com/clas).
+
+You may have already signed it for other Google projects.
diff --git a/vendor/github.com/google/uuid/CONTRIBUTORS b/vendor/github.com/google/uuid/CONTRIBUTORS
new file mode 100644
index 0000000..b4bb97f
--- /dev/null
+++ b/vendor/github.com/google/uuid/CONTRIBUTORS
@@ -0,0 +1,9 @@
+Paul Borman <borman@google.com>
+bmatsuo
+shawnps
+theory
+jboverfelt
+dsymonds
+cd1
+wallclockbuilder
+dansouza
diff --git a/vendor/github.com/google/uuid/LICENSE b/vendor/github.com/google/uuid/LICENSE
new file mode 100644
index 0000000..5dc6826
--- /dev/null
+++ b/vendor/github.com/google/uuid/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009,2014 Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+   * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md
new file mode 100644
index 0000000..f765a46
--- /dev/null
+++ b/vendor/github.com/google/uuid/README.md
@@ -0,0 +1,19 @@
+# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master)
+The uuid package generates and inspects UUIDs based on
+[RFC 4122](http://tools.ietf.org/html/rfc4122)
+and DCE 1.1: Authentication and Security Services. 
+
+This package is based on the github.com/pborman/uuid package (previously named
+code.google.com/p/go-uuid).  It differs from these earlier packages in that
+a UUID is a 16 byte array rather than a byte slice.  One loss due to this
+change is the ability to represent an invalid UUID (vs a NIL UUID).
+
+###### Install
+`go get github.com/google/uuid`
+
+###### Documentation 
+[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid)
+
+Full `go doc` style documentation for the package can be viewed online without
+installing this package by using the GoDoc site here: 
+http://pkg.go.dev/github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/dce.go b/vendor/github.com/google/uuid/dce.go
new file mode 100644
index 0000000..fa820b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/dce.go
@@ -0,0 +1,80 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"fmt"
+	"os"
+)
+
+// A Domain represents a Version 2 domain
+type Domain byte
+
+// Domain constants for DCE Security (Version 2) UUIDs.
+const (
+	Person = Domain(0)
+	Group  = Domain(1)
+	Org    = Domain(2)
+)
+
+// NewDCESecurity returns a DCE Security (Version 2) UUID.
+//
+// The domain should be one of Person, Group or Org.
+// On a POSIX system the id should be the users UID for the Person
+// domain and the users GID for the Group.  The meaning of id for
+// the domain Org or on non-POSIX systems is site defined.
+//
+// For a given domain/id pair the same token may be returned for up to
+// 7 minutes and 10 seconds.
+func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
+	uuid, err := NewUUID()
+	if err == nil {
+		uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
+		uuid[9] = byte(domain)
+		binary.BigEndian.PutUint32(uuid[0:], id)
+	}
+	return uuid, err
+}
+
+// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
+// domain with the id returned by os.Getuid.
+//
+//  NewDCESecurity(Person, uint32(os.Getuid()))
+func NewDCEPerson() (UUID, error) {
+	return NewDCESecurity(Person, uint32(os.Getuid()))
+}
+
+// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
+// domain with the id returned by os.Getgid.
+//
+//  NewDCESecurity(Group, uint32(os.Getgid()))
+func NewDCEGroup() (UUID, error) {
+	return NewDCESecurity(Group, uint32(os.Getgid()))
+}
+
+// Domain returns the domain for a Version 2 UUID.  Domains are only defined
+// for Version 2 UUIDs.
+func (uuid UUID) Domain() Domain {
+	return Domain(uuid[9])
+}
+
+// ID returns the id for a Version 2 UUID. IDs are only defined for Version 2
+// UUIDs.
+func (uuid UUID) ID() uint32 {
+	return binary.BigEndian.Uint32(uuid[0:4])
+}
+
+func (d Domain) String() string {
+	switch d {
+	case Person:
+		return "Person"
+	case Group:
+		return "Group"
+	case Org:
+		return "Org"
+	}
+	return fmt.Sprintf("Domain%d", int(d))
+}
diff --git a/vendor/github.com/google/uuid/doc.go b/vendor/github.com/google/uuid/doc.go
new file mode 100644
index 0000000..5b8a4b9
--- /dev/null
+++ b/vendor/github.com/google/uuid/doc.go
@@ -0,0 +1,12 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package uuid generates and inspects UUIDs.
+//
+// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security
+// Services.
+//
+// A UUID is a 16 byte (128 bit) array.  UUIDs may be used as keys to
+// maps or compared directly.
+package uuid
diff --git a/vendor/github.com/google/uuid/go.mod b/vendor/github.com/google/uuid/go.mod
new file mode 100644
index 0000000..fc84cd7
--- /dev/null
+++ b/vendor/github.com/google/uuid/go.mod
@@ -0,0 +1 @@
+module github.com/google/uuid
diff --git a/vendor/github.com/google/uuid/hash.go b/vendor/github.com/google/uuid/hash.go
new file mode 100644
index 0000000..b404f4b
--- /dev/null
+++ b/vendor/github.com/google/uuid/hash.go
@@ -0,0 +1,53 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"crypto/md5"
+	"crypto/sha1"
+	"hash"
+)
+
+// Well known namespace IDs and UUIDs
+var (
+	NameSpaceDNS  = Must(Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceURL  = Must(Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceOID  = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
+	NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
+	Nil           UUID // empty UUID, all zeros
+)
+
+// NewHash returns a new UUID derived from the hash of space concatenated with
+// data generated by h.  The hash should be at least 16 byte in length.  The
+// first 16 bytes of the hash are used to form the UUID.  The version of the
+// UUID will be the lower 4 bits of version.  NewHash is used to implement
+// NewMD5 and NewSHA1.
+func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
+	h.Reset()
+	h.Write(space[:]) //nolint:errcheck
+	h.Write(data)     //nolint:errcheck
+	s := h.Sum(nil)
+	var uuid UUID
+	copy(uuid[:], s)
+	uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
+	return uuid
+}
+
+// NewMD5 returns a new MD5 (Version 3) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(md5.New(), space, data, 3)
+func NewMD5(space UUID, data []byte) UUID {
+	return NewHash(md5.New(), space, data, 3)
+}
+
+// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
+// supplied name space and data.  It is the same as calling:
+//
+//  NewHash(sha1.New(), space, data, 5)
+func NewSHA1(space UUID, data []byte) UUID {
+	return NewHash(sha1.New(), space, data, 5)
+}
diff --git a/vendor/github.com/google/uuid/marshal.go b/vendor/github.com/google/uuid/marshal.go
new file mode 100644
index 0000000..14bd340
--- /dev/null
+++ b/vendor/github.com/google/uuid/marshal.go
@@ -0,0 +1,38 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "fmt"
+
+// MarshalText implements encoding.TextMarshaler.
+func (uuid UUID) MarshalText() ([]byte, error) {
+	var js [36]byte
+	encodeHex(js[:], uuid)
+	return js[:], nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (uuid *UUID) UnmarshalText(data []byte) error {
+	id, err := ParseBytes(data)
+	if err != nil {
+		return err
+	}
+	*uuid = id
+	return nil
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (uuid UUID) MarshalBinary() ([]byte, error) {
+	return uuid[:], nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (uuid *UUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(uuid[:], data)
+	return nil
+}
diff --git a/vendor/github.com/google/uuid/node.go b/vendor/github.com/google/uuid/node.go
new file mode 100644
index 0000000..d651a2b
--- /dev/null
+++ b/vendor/github.com/google/uuid/node.go
@@ -0,0 +1,90 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"sync"
+)
+
+var (
+	nodeMu sync.Mutex
+	ifname string  // name of interface being used
+	nodeID [6]byte // hardware for version 1 UUIDs
+	zeroID [6]byte // nodeID with only 0's
+)
+
+// NodeInterface returns the name of the interface from which the NodeID was
+// derived.  The interface "user" is returned if the NodeID was set by
+// SetNodeID.
+func NodeInterface() string {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return ifname
+}
+
+// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
+// If name is "" then the first usable interface found will be used or a random
+// Node ID will be generated.  If a named interface cannot be found then false
+// is returned.
+//
+// SetNodeInterface never fails when name is "".
+func SetNodeInterface(name string) bool {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	return setNodeInterface(name)
+}
+
+func setNodeInterface(name string) bool {
+	iname, addr := getHardwareInterface(name) // null implementation for js
+	if iname != "" && addr != nil {
+		ifname = iname
+		copy(nodeID[:], addr)
+		return true
+	}
+
+	// We found no interfaces with a valid hardware address.  If name
+	// does not specify a specific interface generate a random Node ID
+	// (section 4.1.6)
+	if name == "" {
+		ifname = "random"
+		randomBits(nodeID[:])
+		return true
+	}
+	return false
+}
+
+// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
+// if not already set.
+func NodeID() []byte {
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	nid := nodeID
+	return nid[:]
+}
+
+// SetNodeID sets the Node ID to be used for Version 1 UUIDs.  The first 6 bytes
+// of id are used.  If id is less than 6 bytes then false is returned and the
+// Node ID is not set.
+func SetNodeID(id []byte) bool {
+	if len(id) < 6 {
+		return false
+	}
+	defer nodeMu.Unlock()
+	nodeMu.Lock()
+	copy(nodeID[:], id)
+	ifname = "user"
+	return true
+}
+
+// NodeID returns the 6 byte node id encoded in uuid.  It returns nil if uuid is
+// not valid.  The NodeID is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) NodeID() []byte {
+	var node [6]byte
+	copy(node[:], uuid[10:])
+	return node[:]
+}
diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go
new file mode 100644
index 0000000..24b78ed
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_js.go
@@ -0,0 +1,12 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build js
+
+package uuid
+
+// getHardwareInterface returns nil values for the JS version of the code.
+// This remvoves the "net" dependency, because it is not used in the browser.
+// Using the "net" library inflates the size of the transpiled JS code by 673k bytes.
+func getHardwareInterface(name string) (string, []byte) { return "", nil }
diff --git a/vendor/github.com/google/uuid/node_net.go b/vendor/github.com/google/uuid/node_net.go
new file mode 100644
index 0000000..0cbbcdd
--- /dev/null
+++ b/vendor/github.com/google/uuid/node_net.go
@@ -0,0 +1,33 @@
+// Copyright 2017 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build !js
+
+package uuid
+
+import "net"
+
+var interfaces []net.Interface // cached list of interfaces
+
+// getHardwareInterface returns the name and hardware address of interface name.
+// If name is "" then the name and hardware address of one of the system's
+// interfaces is returned.  If no interfaces are found (name does not exist or
+// there are no interfaces) then "", nil is returned.
+//
+// Only addresses of at least 6 bytes are returned.
+func getHardwareInterface(name string) (string, []byte) {
+	if interfaces == nil {
+		var err error
+		interfaces, err = net.Interfaces()
+		if err != nil {
+			return "", nil
+		}
+	}
+	for _, ifs := range interfaces {
+		if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
+			return ifs.Name, ifs.HardwareAddr
+		}
+	}
+	return "", nil
+}
diff --git a/vendor/github.com/google/uuid/null.go b/vendor/github.com/google/uuid/null.go
new file mode 100644
index 0000000..d7fcbf2
--- /dev/null
+++ b/vendor/github.com/google/uuid/null.go
@@ -0,0 +1,118 @@
+// Copyright 2021 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"database/sql/driver"
+	"encoding/json"
+	"fmt"
+)
+
+var jsonNull = []byte("null")
+
+// NullUUID represents a UUID that may be null.
+// NullUUID implements the SQL driver.Scanner interface so
+// it can be used as a scan destination:
+//
+//  var u uuid.NullUUID
+//  err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
+//  ...
+//  if u.Valid {
+//     // use u.UUID
+//  } else {
+//     // NULL value
+//  }
+//
+type NullUUID struct {
+	UUID  UUID
+	Valid bool // Valid is true if UUID is not NULL
+}
+
+// Scan implements the SQL driver.Scanner interface.
+func (nu *NullUUID) Scan(value interface{}) error {
+	if value == nil {
+		nu.UUID, nu.Valid = Nil, false
+		return nil
+	}
+
+	err := nu.UUID.Scan(value)
+	if err != nil {
+		nu.Valid = false
+		return err
+	}
+
+	nu.Valid = true
+	return nil
+}
+
+// Value implements the driver Valuer interface.
+func (nu NullUUID) Value() (driver.Value, error) {
+	if !nu.Valid {
+		return nil, nil
+	}
+	// Delegate to UUID Value function
+	return nu.UUID.Value()
+}
+
+// MarshalBinary implements encoding.BinaryMarshaler.
+func (nu NullUUID) MarshalBinary() ([]byte, error) {
+	if nu.Valid {
+		return nu.UUID[:], nil
+	}
+
+	return []byte(nil), nil
+}
+
+// UnmarshalBinary implements encoding.BinaryUnmarshaler.
+func (nu *NullUUID) UnmarshalBinary(data []byte) error {
+	if len(data) != 16 {
+		return fmt.Errorf("invalid UUID (got %d bytes)", len(data))
+	}
+	copy(nu.UUID[:], data)
+	nu.Valid = true
+	return nil
+}
+
+// MarshalText implements encoding.TextMarshaler.
+func (nu NullUUID) MarshalText() ([]byte, error) {
+	if nu.Valid {
+		return nu.UUID.MarshalText()
+	}
+
+	return jsonNull, nil
+}
+
+// UnmarshalText implements encoding.TextUnmarshaler.
+func (nu *NullUUID) UnmarshalText(data []byte) error {
+	id, err := ParseBytes(data)
+	if err != nil {
+		nu.Valid = false
+		return err
+	}
+	nu.UUID = id
+	nu.Valid = true
+	return nil
+}
+
+// MarshalJSON implements json.Marshaler.
+func (nu NullUUID) MarshalJSON() ([]byte, error) {
+	if nu.Valid {
+		return json.Marshal(nu.UUID)
+	}
+
+	return jsonNull, nil
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (nu *NullUUID) UnmarshalJSON(data []byte) error {
+	if bytes.Equal(data, jsonNull) {
+		*nu = NullUUID{}
+		return nil // valid null UUID
+	}
+	err := json.Unmarshal(data, &nu.UUID)
+	nu.Valid = err == nil
+	return err
+}
diff --git a/vendor/github.com/google/uuid/sql.go b/vendor/github.com/google/uuid/sql.go
new file mode 100644
index 0000000..2e02ec0
--- /dev/null
+++ b/vendor/github.com/google/uuid/sql.go
@@ -0,0 +1,59 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"database/sql/driver"
+	"fmt"
+)
+
+// Scan implements sql.Scanner so UUIDs can be read from databases transparently.
+// Currently, database types that map to string and []byte are supported. Please
+// consult database-specific driver documentation for matching types.
+func (uuid *UUID) Scan(src interface{}) error {
+	switch src := src.(type) {
+	case nil:
+		return nil
+
+	case string:
+		// if an empty UUID comes from a table, we return a null UUID
+		if src == "" {
+			return nil
+		}
+
+		// see Parse for required string format
+		u, err := Parse(src)
+		if err != nil {
+			return fmt.Errorf("Scan: %v", err)
+		}
+
+		*uuid = u
+
+	case []byte:
+		// if an empty UUID comes from a table, we return a null UUID
+		if len(src) == 0 {
+			return nil
+		}
+
+		// assumes a simple slice of bytes if 16 bytes
+		// otherwise attempts to parse
+		if len(src) != 16 {
+			return uuid.Scan(string(src))
+		}
+		copy((*uuid)[:], src)
+
+	default:
+		return fmt.Errorf("Scan: unable to scan type %T into UUID", src)
+	}
+
+	return nil
+}
+
+// Value implements sql.Valuer so that UUIDs can be written to databases
+// transparently. Currently, UUIDs map to strings. Please consult
+// database-specific driver documentation for matching types.
+func (uuid UUID) Value() (driver.Value, error) {
+	return uuid.String(), nil
+}
diff --git a/vendor/github.com/google/uuid/time.go b/vendor/github.com/google/uuid/time.go
new file mode 100644
index 0000000..e6ef06c
--- /dev/null
+++ b/vendor/github.com/google/uuid/time.go
@@ -0,0 +1,123 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+	"sync"
+	"time"
+)
+
+// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
+// 1582.
+type Time int64
+
+const (
+	lillian    = 2299160          // Julian day of 15 Oct 1582
+	unix       = 2440587          // Julian day of 1 Jan 1970
+	epoch      = unix - lillian   // Days between epochs
+	g1582      = epoch * 86400    // seconds between epochs
+	g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
+)
+
+var (
+	timeMu   sync.Mutex
+	lasttime uint64 // last time we returned
+	clockSeq uint16 // clock sequence for this run
+
+	timeNow = time.Now // for testing
+)
+
+// UnixTime converts t the number of seconds and nanoseconds using the Unix
+// epoch of 1 Jan 1970.
+func (t Time) UnixTime() (sec, nsec int64) {
+	sec = int64(t - g1582ns100)
+	nsec = (sec % 10000000) * 100
+	sec /= 10000000
+	return sec, nsec
+}
+
+// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
+// clock sequence as well as adjusting the clock sequence as needed.  An error
+// is returned if the current time cannot be determined.
+func GetTime() (Time, uint16, error) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return getTime()
+}
+
+func getTime() (Time, uint16, error) {
+	t := timeNow()
+
+	// If we don't have a clock sequence already, set one.
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	now := uint64(t.UnixNano()/100) + g1582ns100
+
+	// If time has gone backwards with this clock sequence then we
+	// increment the clock sequence
+	if now <= lasttime {
+		clockSeq = ((clockSeq + 1) & 0x3fff) | 0x8000
+	}
+	lasttime = now
+	return Time(now), clockSeq, nil
+}
+
+// ClockSequence returns the current clock sequence, generating one if not
+// already set.  The clock sequence is only used for Version 1 UUIDs.
+//
+// The uuid package does not use global static storage for the clock sequence or
+// the last time a UUID was generated.  Unless SetClockSequence is used, a new
+// random clock sequence is generated the first time a clock sequence is
+// requested by ClockSequence, GetTime, or NewUUID.  (section 4.2.1.1)
+func ClockSequence() int {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	return clockSequence()
+}
+
+func clockSequence() int {
+	if clockSeq == 0 {
+		setClockSequence(-1)
+	}
+	return int(clockSeq & 0x3fff)
+}
+
+// SetClockSequence sets the clock sequence to the lower 14 bits of seq.  Setting to
+// -1 causes a new sequence to be generated.
+func SetClockSequence(seq int) {
+	defer timeMu.Unlock()
+	timeMu.Lock()
+	setClockSequence(seq)
+}
+
+func setClockSequence(seq int) {
+	if seq == -1 {
+		var b [2]byte
+		randomBits(b[:]) // clock sequence
+		seq = int(b[0])<<8 | int(b[1])
+	}
+	oldSeq := clockSeq
+	clockSeq = uint16(seq&0x3fff) | 0x8000 // Set our variant
+	if oldSeq != clockSeq {
+		lasttime = 0
+	}
+}
+
+// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
+// uuid.  The time is only defined for version 1 and 2 UUIDs.
+func (uuid UUID) Time() Time {
+	time := int64(binary.BigEndian.Uint32(uuid[0:4]))
+	time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
+	time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
+	return Time(time)
+}
+
+// ClockSequence returns the clock sequence encoded in uuid.
+// The clock sequence is only well defined for version 1 and 2 UUIDs.
+func (uuid UUID) ClockSequence() int {
+	return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff
+}
diff --git a/vendor/github.com/google/uuid/util.go b/vendor/github.com/google/uuid/util.go
new file mode 100644
index 0000000..5ea6c73
--- /dev/null
+++ b/vendor/github.com/google/uuid/util.go
@@ -0,0 +1,43 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"io"
+)
+
+// randomBits completely fills slice b with random data.
+func randomBits(b []byte) {
+	if _, err := io.ReadFull(rander, b); err != nil {
+		panic(err.Error()) // rand should never fail
+	}
+}
+
+// xvalues returns the value of a byte as a hexadecimal digit or 255.
+var xvalues = [256]byte{
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+	255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
+}
+
+// xtob converts hex characters x1 and x2 into a byte.
+func xtob(x1, x2 byte) (byte, bool) {
+	b1 := xvalues[x1]
+	b2 := xvalues[x2]
+	return (b1 << 4) | b2, b1 != 255 && b2 != 255
+}
diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go
new file mode 100644
index 0000000..a57207a
--- /dev/null
+++ b/vendor/github.com/google/uuid/uuid.go
@@ -0,0 +1,294 @@
+// Copyright 2018 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"bytes"
+	"crypto/rand"
+	"encoding/hex"
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"sync"
+)
+
+// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
+// 4122.
+type UUID [16]byte
+
+// A Version represents a UUID's version.
+type Version byte
+
+// A Variant represents a UUID's variant.
+type Variant byte
+
+// Constants returned by Variant.
+const (
+	Invalid   = Variant(iota) // Invalid UUID
+	RFC4122                   // The variant specified in RFC4122
+	Reserved                  // Reserved, NCS backward compatibility.
+	Microsoft                 // Reserved, Microsoft Corporation backward compatibility.
+	Future                    // Reserved for future definition.
+)
+
+const randPoolSize = 16 * 16
+
+var (
+	rander      = rand.Reader // random function
+	poolEnabled = false
+	poolMu      sync.Mutex
+	poolPos     = randPoolSize     // protected with poolMu
+	pool        [randPoolSize]byte // protected with poolMu
+)
+
+type invalidLengthError struct{ len int }
+
+func (err invalidLengthError) Error() string {
+	return fmt.Sprintf("invalid UUID length: %d", err.len)
+}
+
+// IsInvalidLengthError is matcher function for custom error invalidLengthError
+func IsInvalidLengthError(err error) bool {
+	_, ok := err.(invalidLengthError)
+	return ok
+}
+
+// Parse decodes s into a UUID or returns an error.  Both the standard UUID
+// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the
+// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex
+// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx.
+func Parse(s string) (UUID, error) {
+	var uuid UUID
+	switch len(s) {
+	// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36:
+
+	// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9:
+		if strings.ToLower(s[:9]) != "urn:uuid:" {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9])
+		}
+		s = s[9:]
+
+	// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+	case 36 + 2:
+		s = s[1:]
+
+	// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+	case 32:
+		var ok bool
+		for i := range uuid {
+			uuid[i], ok = xtob(s[i*2], s[i*2+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, invalidLengthError{len(s)}
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(s[x], s[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// ParseBytes is like Parse, except it parses a byte slice instead of a string.
+func ParseBytes(b []byte) (UUID, error) {
+	var uuid UUID
+	switch len(b) {
+	case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+		if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) {
+			return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9])
+		}
+		b = b[9:]
+	case 36 + 2: // {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
+		b = b[1:]
+	case 32: // xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
+		var ok bool
+		for i := 0; i < 32; i += 2 {
+			uuid[i/2], ok = xtob(b[i], b[i+1])
+			if !ok {
+				return uuid, errors.New("invalid UUID format")
+			}
+		}
+		return uuid, nil
+	default:
+		return uuid, invalidLengthError{len(b)}
+	}
+	// s is now at least 36 bytes long
+	// it must be of the form  xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+	if b[8] != '-' || b[13] != '-' || b[18] != '-' || b[23] != '-' {
+		return uuid, errors.New("invalid UUID format")
+	}
+	for i, x := range [16]int{
+		0, 2, 4, 6,
+		9, 11,
+		14, 16,
+		19, 21,
+		24, 26, 28, 30, 32, 34} {
+		v, ok := xtob(b[x], b[x+1])
+		if !ok {
+			return uuid, errors.New("invalid UUID format")
+		}
+		uuid[i] = v
+	}
+	return uuid, nil
+}
+
+// MustParse is like Parse but panics if the string cannot be parsed.
+// It simplifies safe initialization of global variables holding compiled UUIDs.
+func MustParse(s string) UUID {
+	uuid, err := Parse(s)
+	if err != nil {
+		panic(`uuid: Parse(` + s + `): ` + err.Error())
+	}
+	return uuid
+}
+
+// FromBytes creates a new UUID from a byte slice. Returns an error if the slice
+// does not have a length of 16. The bytes are copied from the slice.
+func FromBytes(b []byte) (uuid UUID, err error) {
+	err = uuid.UnmarshalBinary(b)
+	return uuid, err
+}
+
+// Must returns uuid if err is nil and panics otherwise.
+func Must(uuid UUID, err error) UUID {
+	if err != nil {
+		panic(err)
+	}
+	return uuid
+}
+
+// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
+// , or "" if uuid is invalid.
+func (uuid UUID) String() string {
+	var buf [36]byte
+	encodeHex(buf[:], uuid)
+	return string(buf[:])
+}
+
+// URN returns the RFC 2141 URN form of uuid,
+// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx,  or "" if uuid is invalid.
+func (uuid UUID) URN() string {
+	var buf [36 + 9]byte
+	copy(buf[:], "urn:uuid:")
+	encodeHex(buf[9:], uuid)
+	return string(buf[:])
+}
+
+func encodeHex(dst []byte, uuid UUID) {
+	hex.Encode(dst, uuid[:4])
+	dst[8] = '-'
+	hex.Encode(dst[9:13], uuid[4:6])
+	dst[13] = '-'
+	hex.Encode(dst[14:18], uuid[6:8])
+	dst[18] = '-'
+	hex.Encode(dst[19:23], uuid[8:10])
+	dst[23] = '-'
+	hex.Encode(dst[24:], uuid[10:])
+}
+
+// Variant returns the variant encoded in uuid.
+func (uuid UUID) Variant() Variant {
+	switch {
+	case (uuid[8] & 0xc0) == 0x80:
+		return RFC4122
+	case (uuid[8] & 0xe0) == 0xc0:
+		return Microsoft
+	case (uuid[8] & 0xe0) == 0xe0:
+		return Future
+	default:
+		return Reserved
+	}
+}
+
+// Version returns the version of uuid.
+func (uuid UUID) Version() Version {
+	return Version(uuid[6] >> 4)
+}
+
+func (v Version) String() string {
+	if v > 15 {
+		return fmt.Sprintf("BAD_VERSION_%d", v)
+	}
+	return fmt.Sprintf("VERSION_%d", v)
+}
+
+func (v Variant) String() string {
+	switch v {
+	case RFC4122:
+		return "RFC4122"
+	case Reserved:
+		return "Reserved"
+	case Microsoft:
+		return "Microsoft"
+	case Future:
+		return "Future"
+	case Invalid:
+		return "Invalid"
+	}
+	return fmt.Sprintf("BadVariant%d", int(v))
+}
+
+// SetRand sets the random number generator to r, which implements io.Reader.
+// If r.Read returns an error when the package requests random data then
+// a panic will be issued.
+//
+// Calling SetRand with nil sets the random number generator to the default
+// generator.
+func SetRand(r io.Reader) {
+	if r == nil {
+		rander = rand.Reader
+		return
+	}
+	rander = r
+}
+
+// EnableRandPool enables internal randomness pool used for Random
+// (Version 4) UUID generation. The pool contains random bytes read from
+// the random number generator on demand in batches. Enabling the pool
+// may improve the UUID generation throughput significantly.
+//
+// Since the pool is stored on the Go heap, this feature may be a bad fit
+// for security sensitive applications.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func EnableRandPool() {
+	poolEnabled = true
+}
+
+// DisableRandPool disables the randomness pool if it was previously
+// enabled with EnableRandPool.
+//
+// Both EnableRandPool and DisableRandPool are not thread-safe and should
+// only be called when there is no possibility that New or any other
+// UUID Version 4 generation function will be called concurrently.
+func DisableRandPool() {
+	poolEnabled = false
+	defer poolMu.Unlock()
+	poolMu.Lock()
+	poolPos = randPoolSize
+}
diff --git a/vendor/github.com/google/uuid/version1.go b/vendor/github.com/google/uuid/version1.go
new file mode 100644
index 0000000..4631096
--- /dev/null
+++ b/vendor/github.com/google/uuid/version1.go
@@ -0,0 +1,44 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import (
+	"encoding/binary"
+)
+
+// NewUUID returns a Version 1 UUID based on the current NodeID and clock
+// sequence, and the current time.  If the NodeID has not been set by SetNodeID
+// or SetNodeInterface then it will be set automatically.  If the NodeID cannot
+// be set NewUUID returns nil.  If clock sequence has not been set by
+// SetClockSequence then it will be set automatically.  If GetTime fails to
+// return the current NewUUID returns nil and an error.
+//
+// In most cases, New should be used.
+func NewUUID() (UUID, error) {
+	var uuid UUID
+	now, seq, err := GetTime()
+	if err != nil {
+		return uuid, err
+	}
+
+	timeLow := uint32(now & 0xffffffff)
+	timeMid := uint16((now >> 32) & 0xffff)
+	timeHi := uint16((now >> 48) & 0x0fff)
+	timeHi |= 0x1000 // Version 1
+
+	binary.BigEndian.PutUint32(uuid[0:], timeLow)
+	binary.BigEndian.PutUint16(uuid[4:], timeMid)
+	binary.BigEndian.PutUint16(uuid[6:], timeHi)
+	binary.BigEndian.PutUint16(uuid[8:], seq)
+
+	nodeMu.Lock()
+	if nodeID == zeroID {
+		setNodeInterface("")
+	}
+	copy(uuid[10:], nodeID[:])
+	nodeMu.Unlock()
+
+	return uuid, nil
+}
diff --git a/vendor/github.com/google/uuid/version4.go b/vendor/github.com/google/uuid/version4.go
new file mode 100644
index 0000000..7697802
--- /dev/null
+++ b/vendor/github.com/google/uuid/version4.go
@@ -0,0 +1,76 @@
+// Copyright 2016 Google Inc.  All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package uuid
+
+import "io"
+
+// New creates a new random UUID or panics.  New is equivalent to
+// the expression
+//
+//    uuid.Must(uuid.NewRandom())
+func New() UUID {
+	return Must(NewRandom())
+}
+
+// NewString creates a new random UUID and returns it as a string or panics.
+// NewString is equivalent to the expression
+//
+//    uuid.New().String()
+func NewString() string {
+	return Must(NewRandom()).String()
+}
+
+// NewRandom returns a Random (Version 4) UUID.
+//
+// The strength of the UUIDs is based on the strength of the crypto/rand
+// package.
+//
+// Uses the randomness pool if it was enabled with EnableRandPool.
+//
+// A note about uniqueness derived from the UUID Wikipedia entry:
+//
+//  Randomly generated UUIDs have 122 random bits.  One's annual risk of being
+//  hit by a meteorite is estimated to be one chance in 17 billion, that
+//  means the probability is about 0.00000000006 (6 × 10−11),
+//  equivalent to the odds of creating a few tens of trillions of UUIDs in a
+//  year and having one duplicate.
+func NewRandom() (UUID, error) {
+	if !poolEnabled {
+		return NewRandomFromReader(rander)
+	}
+	return newRandomFromPool()
+}
+
+// NewRandomFromReader returns a UUID based on bytes read from a given io.Reader.
+func NewRandomFromReader(r io.Reader) (UUID, error) {
+	var uuid UUID
+	_, err := io.ReadFull(r, uuid[:])
+	if err != nil {
+		return Nil, err
+	}
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}
+
+func newRandomFromPool() (UUID, error) {
+	var uuid UUID
+	poolMu.Lock()
+	if poolPos == randPoolSize {
+		_, err := io.ReadFull(rander, pool[:])
+		if err != nil {
+			poolMu.Unlock()
+			return Nil, err
+		}
+		poolPos = 0
+	}
+	copy(uuid[:], pool[poolPos:(poolPos+16)])
+	poolPos += 16
+	poolMu.Unlock()
+
+	uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
+	uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
+	return uuid, nil
+}
diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS
new file mode 100644
index 0000000..b722392
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/AUTHORS
@@ -0,0 +1,8 @@
+# This is the official list of gorilla/mux authors for copyright purposes.
+#
+# Please keep the list sorted.
+
+Google LLC (https://opensource.google.com/)
+Kamil Kisielk <kamil@kamilkisiel.net>
+Matt Silverlock <matt@eatsleeprepeat.net>
+Rodrigo Moraes (https://github.com/moraes)
diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE
new file mode 100644
index 0000000..6903df6
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+	 * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+	 * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+	 * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md
new file mode 100644
index 0000000..35eea9f
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/README.md
@@ -0,0 +1,805 @@
+# gorilla/mux
+
+[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux)
+[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux)
+[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge)
+
+![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png)
+
+https://www.gorillatoolkit.org/pkg/mux
+
+Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to
+their respective handler.
+
+The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are:
+
+* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`.
+* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers.
+* URL hosts, paths and query values can have variables with an optional regular expression.
+* Registered URLs can be built, or "reversed", which helps maintaining references to resources.
+* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching.
+
+---
+
+* [Install](#install)
+* [Examples](#examples)
+* [Matching Routes](#matching-routes)
+* [Static Files](#static-files)
+* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.)
+* [Registered URLs](#registered-urls)
+* [Walking Routes](#walking-routes)
+* [Graceful Shutdown](#graceful-shutdown)
+* [Middleware](#middleware)
+* [Handling CORS Requests](#handling-cors-requests)
+* [Testing Handlers](#testing-handlers)
+* [Full Example](#full-example)
+
+---
+
+## Install
+
+With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain:
+
+```sh
+go get -u github.com/gorilla/mux
+```
+
+## Examples
+
+Let's start registering a couple of URL paths and handlers:
+
+```go
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/", HomeHandler)
+    r.HandleFunc("/products", ProductsHandler)
+    r.HandleFunc("/articles", ArticlesHandler)
+    http.Handle("/", r)
+}
+```
+
+Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters.
+
+Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/products/{key}", ProductHandler)
+r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`:
+
+```go
+func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) {
+    vars := mux.Vars(r)
+    w.WriteHeader(http.StatusOK)
+    fmt.Fprintf(w, "Category: %v\n", vars["category"])
+}
+```
+
+And this is all you need to know about the basic usage. More advanced options are explained below.
+
+### Matching Routes
+
+Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables:
+
+```go
+r := mux.NewRouter()
+// Only matches if domain is "www.example.com".
+r.Host("www.example.com")
+// Matches a dynamic subdomain.
+r.Host("{subdomain:[a-z]+}.example.com")
+```
+
+There are several other matchers that can be added. To match path prefixes:
+
+```go
+r.PathPrefix("/products/")
+```
+
+...or HTTP methods:
+
+```go
+r.Methods("GET", "POST")
+```
+
+...or URL schemes:
+
+```go
+r.Schemes("https")
+```
+
+...or header values:
+
+```go
+r.Headers("X-Requested-With", "XMLHttpRequest")
+```
+
+...or query values:
+
+```go
+r.Queries("key", "value")
+```
+
+...or to use a custom matcher function:
+
+```go
+r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+    return r.ProtoMajor == 0
+})
+```
+
+...and finally, it is possible to combine several matchers in a single route:
+
+```go
+r.HandleFunc("/products", ProductsHandler).
+  Host("www.example.com").
+  Methods("GET").
+  Schemes("http")
+```
+
+Routes are tested in the order they were added to the router. If two routes match, the first one wins:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/specific", specificHandler)
+r.PathPrefix("/").Handler(catchAllHandler)
+```
+
+Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it:
+
+```go
+r := mux.NewRouter()
+s := r.Host("www.example.com").Subrouter()
+```
+
+Then register routes in the subrouter:
+
+```go
+s.HandleFunc("/products/", ProductsHandler)
+s.HandleFunc("/products/{key}", ProductHandler)
+s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+```
+
+The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths:
+
+```go
+r := mux.NewRouter()
+s := r.PathPrefix("/products").Subrouter()
+// "/products/"
+s.HandleFunc("/", ProductsHandler)
+// "/products/{key}/"
+s.HandleFunc("/{key}/", ProductHandler)
+// "/products/{key}/details"
+s.HandleFunc("/{key}/details", ProductDetailsHandler)
+```
+
+
+### Static Files
+
+Note that the path provided to `PathPrefix()` represents a "wildcard": calling
+`PathPrefix("/static/").Handler(...)` means that the handler will be passed any
+request that matches "/static/\*". This makes it easy to serve static files with mux:
+
+```go
+func main() {
+    var dir string
+
+    flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+    flag.Parse()
+    r := mux.NewRouter()
+
+    // This will serve files under http://localhost:8000/static/<filename>
+    r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+    srv := &http.Server{
+        Handler:      r,
+        Addr:         "127.0.0.1:8000",
+        // Good practice: enforce timeouts for servers you create!
+        WriteTimeout: 15 * time.Second,
+        ReadTimeout:  15 * time.Second,
+    }
+
+    log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Serving Single Page Applications
+
+Most of the time it makes sense to serve your SPA on a separate web server from your API,
+but sometimes it's desirable to serve them both from one place. It's possible to write a simple
+handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage
+mux's powerful routing for your API endpoints.
+
+```go
+package main
+
+import (
+	"encoding/json"
+	"log"
+	"net/http"
+	"os"
+	"path/filepath"
+	"time"
+
+	"github.com/gorilla/mux"
+)
+
+// spaHandler implements the http.Handler interface, so we can use it
+// to respond to HTTP requests. The path to the static directory and
+// path to the index file within that static directory are used to
+// serve the SPA in the given static directory.
+type spaHandler struct {
+	staticPath string
+	indexPath  string
+}
+
+// ServeHTTP inspects the URL path to locate a file within the static dir
+// on the SPA handler. If a file is found, it will be served. If not, the
+// file located at the index path on the SPA handler will be served. This
+// is suitable behavior for serving an SPA (single page application).
+func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+    // get the absolute path to prevent directory traversal
+	path, err := filepath.Abs(r.URL.Path)
+	if err != nil {
+        // if we failed to get the absolute path respond with a 400 bad request
+        // and stop
+		http.Error(w, err.Error(), http.StatusBadRequest)
+		return
+	}
+
+    // prepend the path with the path to the static directory
+	path = filepath.Join(h.staticPath, path)
+
+    // check whether a file exists at the given path
+	_, err = os.Stat(path)
+	if os.IsNotExist(err) {
+		// file does not exist, serve index.html
+		http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath))
+		return
+	} else if err != nil {
+        // if we got an error (that wasn't that the file doesn't exist) stating the
+        // file, return a 500 internal server error and stop
+		http.Error(w, err.Error(), http.StatusInternalServerError)
+		return
+	}
+
+    // otherwise, use http.FileServer to serve the static dir
+	http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r)
+}
+
+func main() {
+	router := mux.NewRouter()
+
+	router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) {
+		// an example API handler
+		json.NewEncoder(w).Encode(map[string]bool{"ok": true})
+	})
+
+	spa := spaHandler{staticPath: "build", indexPath: "index.html"}
+	router.PathPrefix("/").Handler(spa)
+
+	srv := &http.Server{
+		Handler: router,
+		Addr:    "127.0.0.1:8000",
+		// Good practice: enforce timeouts for servers you create!
+		WriteTimeout: 15 * time.Second,
+		ReadTimeout:  15 * time.Second,
+	}
+
+	log.Fatal(srv.ListenAndServe())
+}
+```
+
+### Registered URLs
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+  Name("article")
+```
+
+To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do:
+
+```go
+url, err := r.Get("article").URL("category", "technology", "id", "42")
+```
+
+...and the result will be a `url.URL` with the following path:
+
+```
+"/articles/technology/42"
+```
+
+This also works for host and query value variables:
+
+```go
+r := mux.NewRouter()
+r.Host("{subdomain}.example.com").
+  Path("/articles/{category}/{id:[0-9]+}").
+  Queries("filter", "{filter}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42",
+                                 "filter", "gorilla")
+```
+
+All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+```go
+r.HeadersRegexp("Content-Type", "application/(text|json)")
+```
+
+...and the route will match both requests with a Content-Type of `application/json` as well as `application/text`
+
+There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do:
+
+```go
+// "http://news.example.com/"
+host, err := r.Get("article").URLHost("subdomain", "news")
+
+// "/articles/technology/42"
+path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+```
+
+And if you use subrouters, host and path defined separately can be built as well:
+
+```go
+r := mux.NewRouter()
+s := r.Host("{subdomain}.example.com").Subrouter()
+s.Path("/articles/{category}/{id:[0-9]+}").
+  HandlerFunc(ArticleHandler).
+  Name("article")
+
+// "http://news.example.com/articles/technology/42"
+url, err := r.Get("article").URL("subdomain", "news",
+                                 "category", "technology",
+                                 "id", "42")
+```
+
+### Walking Routes
+
+The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example,
+the following prints all of the registered routes:
+
+```go
+package main
+
+import (
+	"fmt"
+	"net/http"
+	"strings"
+
+	"github.com/gorilla/mux"
+)
+
+func handler(w http.ResponseWriter, r *http.Request) {
+	return
+}
+
+func main() {
+	r := mux.NewRouter()
+	r.HandleFunc("/", handler)
+	r.HandleFunc("/products", handler).Methods("POST")
+	r.HandleFunc("/articles", handler).Methods("GET")
+	r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT")
+	r.HandleFunc("/authors", handler).Queries("surname", "{surname}")
+	err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error {
+		pathTemplate, err := route.GetPathTemplate()
+		if err == nil {
+			fmt.Println("ROUTE:", pathTemplate)
+		}
+		pathRegexp, err := route.GetPathRegexp()
+		if err == nil {
+			fmt.Println("Path regexp:", pathRegexp)
+		}
+		queriesTemplates, err := route.GetQueriesTemplates()
+		if err == nil {
+			fmt.Println("Queries templates:", strings.Join(queriesTemplates, ","))
+		}
+		queriesRegexps, err := route.GetQueriesRegexp()
+		if err == nil {
+			fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ","))
+		}
+		methods, err := route.GetMethods()
+		if err == nil {
+			fmt.Println("Methods:", strings.Join(methods, ","))
+		}
+		fmt.Println()
+		return nil
+	})
+
+	if err != nil {
+		fmt.Println(err)
+	}
+
+	http.Handle("/", r)
+}
+```
+
+### Graceful Shutdown
+
+Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`:
+
+```go
+package main
+
+import (
+    "context"
+    "flag"
+    "log"
+    "net/http"
+    "os"
+    "os/signal"
+    "time"
+
+    "github.com/gorilla/mux"
+)
+
+func main() {
+    var wait time.Duration
+    flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m")
+    flag.Parse()
+
+    r := mux.NewRouter()
+    // Add your routes as needed
+
+    srv := &http.Server{
+        Addr:         "0.0.0.0:8080",
+        // Good practice to set timeouts to avoid Slowloris attacks.
+        WriteTimeout: time.Second * 15,
+        ReadTimeout:  time.Second * 15,
+        IdleTimeout:  time.Second * 60,
+        Handler: r, // Pass our instance of gorilla/mux in.
+    }
+
+    // Run our server in a goroutine so that it doesn't block.
+    go func() {
+        if err := srv.ListenAndServe(); err != nil {
+            log.Println(err)
+        }
+    }()
+
+    c := make(chan os.Signal, 1)
+    // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C)
+    // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught.
+    signal.Notify(c, os.Interrupt)
+
+    // Block until we receive our signal.
+    <-c
+
+    // Create a deadline to wait for.
+    ctx, cancel := context.WithTimeout(context.Background(), wait)
+    defer cancel()
+    // Doesn't block if no connections, but will otherwise wait
+    // until the timeout deadline.
+    srv.Shutdown(ctx)
+    // Optionally, you could run srv.Shutdown in a goroutine and block on
+    // <-ctx.Done() if your application should wait for other services
+    // to finalize based on context cancellation.
+    log.Println("shutting down")
+    os.Exit(0)
+}
+```
+
+### Middleware
+
+Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters.
+Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking.
+
+Mux middlewares are defined using the de facto standard type:
+
+```go
+type MiddlewareFunc func(http.Handler) http.Handler
+```
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers.
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+```go
+func loggingMiddleware(next http.Handler) http.Handler {
+    return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+        // Do stuff here
+        log.Println(r.RequestURI)
+        // Call the next handler, which can be another middleware in the chain, or the final handler.
+        next.ServeHTTP(w, r)
+    })
+}
+```
+
+Middlewares can be added to a router using `Router.Use()`:
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+r.Use(loggingMiddleware)
+```
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+```go
+// Define our struct
+type authenticationMiddleware struct {
+	tokenUsers map[string]string
+}
+
+// Initialize it somewhere
+func (amw *authenticationMiddleware) Populate() {
+	amw.tokenUsers["00000000"] = "user0"
+	amw.tokenUsers["aaaaaaaa"] = "userA"
+	amw.tokenUsers["05f717e5"] = "randomUser"
+	amw.tokenUsers["deadbeef"] = "user0"
+}
+
+// Middleware function, which will be called for each request
+func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+    return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+        token := r.Header.Get("X-Session-Token")
+
+        if user, found := amw.tokenUsers[token]; found {
+        	// We found the token in our map
+        	log.Printf("Authenticated user %s\n", user)
+        	// Pass down the request to the next middleware (or final handler)
+        	next.ServeHTTP(w, r)
+        } else {
+        	// Write an error and stop the handler chain
+        	http.Error(w, "Forbidden", http.StatusForbidden)
+        }
+    })
+}
+```
+
+```go
+r := mux.NewRouter()
+r.HandleFunc("/", handler)
+
+amw := authenticationMiddleware{}
+amw.Populate()
+
+r.Use(amw.Middleware)
+```
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it.
+
+### Handling CORS Requests
+
+[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header.
+
+* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin`
+* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route
+* If you do not specify any methods, then:
+> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers.
+
+Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers:
+
+```go
+package main
+
+import (
+	"net/http"
+	"github.com/gorilla/mux"
+)
+
+func main() {
+    r := mux.NewRouter()
+
+    // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers
+    r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions)
+    r.Use(mux.CORSMethodMiddleware(r))
+    
+    http.ListenAndServe(":8080", r)
+}
+
+func fooHandler(w http.ResponseWriter, r *http.Request) {
+    w.Header().Set("Access-Control-Allow-Origin", "*")
+    if r.Method == http.MethodOptions {
+        return
+    }
+
+    w.Write([]byte("foo"))
+}
+```
+
+And an request to `/foo` using something like:
+
+```bash
+curl localhost:8080/foo -v
+```
+
+Would look like:
+
+```bash
+*   Trying ::1...
+* TCP_NODELAY set
+* Connected to localhost (::1) port 8080 (#0)
+> GET /foo HTTP/1.1
+> Host: localhost:8080
+> User-Agent: curl/7.59.0
+> Accept: */*
+> 
+< HTTP/1.1 200 OK
+< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS
+< Access-Control-Allow-Origin: *
+< Date: Fri, 28 Jun 2019 20:13:30 GMT
+< Content-Length: 3
+< Content-Type: text/plain; charset=utf-8
+< 
+* Connection #0 to host localhost left intact
+foo
+```
+
+### Testing Handlers
+
+Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_.
+
+First, our simple HTTP handler:
+
+```go
+// endpoints.go
+package main
+
+func HealthCheckHandler(w http.ResponseWriter, r *http.Request) {
+    // A very simple health check.
+    w.Header().Set("Content-Type", "application/json")
+    w.WriteHeader(http.StatusOK)
+
+    // In the future we could report back on the status of our DB, or our cache
+    // (e.g. Redis) by performing a simple PING, and include them in the response.
+    io.WriteString(w, `{"alive": true}`)
+}
+
+func main() {
+    r := mux.NewRouter()
+    r.HandleFunc("/health", HealthCheckHandler)
+
+    log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test code:
+
+```go
+// endpoints_test.go
+package main
+
+import (
+    "net/http"
+    "net/http/httptest"
+    "testing"
+)
+
+func TestHealthCheckHandler(t *testing.T) {
+    // Create a request to pass to our handler. We don't have any query parameters for now, so we'll
+    // pass 'nil' as the third parameter.
+    req, err := http.NewRequest("GET", "/health", nil)
+    if err != nil {
+        t.Fatal(err)
+    }
+
+    // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response.
+    rr := httptest.NewRecorder()
+    handler := http.HandlerFunc(HealthCheckHandler)
+
+    // Our handlers satisfy http.Handler, so we can call their ServeHTTP method
+    // directly and pass in our Request and ResponseRecorder.
+    handler.ServeHTTP(rr, req)
+
+    // Check the status code is what we expect.
+    if status := rr.Code; status != http.StatusOK {
+        t.Errorf("handler returned wrong status code: got %v want %v",
+            status, http.StatusOK)
+    }
+
+    // Check the response body is what we expect.
+    expected := `{"alive": true}`
+    if rr.Body.String() != expected {
+        t.Errorf("handler returned unexpected body: got %v want %v",
+            rr.Body.String(), expected)
+    }
+}
+```
+
+In the case that our routes have [variables](#examples), we can pass those in the request. We could write
+[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple
+possible route variables as needed.
+
+```go
+// endpoints.go
+func main() {
+    r := mux.NewRouter()
+    // A route with a route variable:
+    r.HandleFunc("/metrics/{type}", MetricsHandler)
+
+    log.Fatal(http.ListenAndServe("localhost:8080", r))
+}
+```
+
+Our test file, with a table-driven test of `routeVariables`:
+
+```go
+// endpoints_test.go
+func TestMetricsHandler(t *testing.T) {
+    tt := []struct{
+        routeVariable string
+        shouldPass bool
+    }{
+        {"goroutines", true},
+        {"heap", true},
+        {"counters", true},
+        {"queries", true},
+        {"adhadaeqm3k", false},
+    }
+
+    for _, tc := range tt {
+        path := fmt.Sprintf("/metrics/%s", tc.routeVariable)
+        req, err := http.NewRequest("GET", path, nil)
+        if err != nil {
+            t.Fatal(err)
+        }
+
+        rr := httptest.NewRecorder()
+	
+	// Need to create a router that we can pass the request through so that the vars will be added to the context
+	router := mux.NewRouter()
+        router.HandleFunc("/metrics/{type}", MetricsHandler)
+        router.ServeHTTP(rr, req)
+
+        // In this case, our MetricsHandler returns a non-200 response
+        // for a route variable it doesn't know about.
+        if rr.Code == http.StatusOK && !tc.shouldPass {
+            t.Errorf("handler should have failed on routeVariable %s: got %v want %v",
+                tc.routeVariable, rr.Code, http.StatusOK)
+        }
+    }
+}
+```
+
+## Full Example
+
+Here's a complete, runnable example of a small `mux` based server:
+
+```go
+package main
+
+import (
+    "net/http"
+    "log"
+    "github.com/gorilla/mux"
+)
+
+func YourHandler(w http.ResponseWriter, r *http.Request) {
+    w.Write([]byte("Gorilla!\n"))
+}
+
+func main() {
+    r := mux.NewRouter()
+    // Routes consist of a path and a handler function.
+    r.HandleFunc("/", YourHandler)
+
+    // Bind to a port and pass our router in
+    log.Fatal(http.ListenAndServe(":8000", r))
+}
+```
+
+## License
+
+BSD licensed. See the LICENSE file for details.
diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go
new file mode 100644
index 0000000..bd5a38b
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/doc.go
@@ -0,0 +1,306 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package mux implements a request router and dispatcher.
+
+The name mux stands for "HTTP request multiplexer". Like the standard
+http.ServeMux, mux.Router matches incoming requests against a list of
+registered routes and calls a handler for the route that matches the URL
+or other conditions. The main features are:
+
+	* Requests can be matched based on URL host, path, path prefix, schemes,
+	  header and query values, HTTP methods or using custom matchers.
+	* URL hosts, paths and query values can have variables with an optional
+	  regular expression.
+	* Registered URLs can be built, or "reversed", which helps maintaining
+	  references to resources.
+	* Routes can be used as subrouters: nested routes are only tested if the
+	  parent route matches. This is useful to define groups of routes that
+	  share common conditions like a host, a path prefix or other repeated
+	  attributes. As a bonus, this optimizes request matching.
+	* It implements the http.Handler interface so it is compatible with the
+	  standard http.ServeMux.
+
+Let's start registering a couple of URL paths and handlers:
+
+	func main() {
+		r := mux.NewRouter()
+		r.HandleFunc("/", HomeHandler)
+		r.HandleFunc("/products", ProductsHandler)
+		r.HandleFunc("/articles", ArticlesHandler)
+		http.Handle("/", r)
+	}
+
+Here we register three routes mapping URL paths to handlers. This is
+equivalent to how http.HandleFunc() works: if an incoming request URL matches
+one of the paths, the corresponding handler is called passing
+(http.ResponseWriter, *http.Request) as parameters.
+
+Paths can have variables. They are defined using the format {name} or
+{name:pattern}. If a regular expression pattern is not defined, the matched
+variable will be anything until the next slash. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/products/{key}", ProductHandler)
+	r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler)
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler)
+
+Groups can be used inside patterns, as long as they are non-capturing (?:re). For example:
+
+	r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler)
+
+The names are used to create a map of route variables which can be retrieved
+calling mux.Vars():
+
+	vars := mux.Vars(request)
+	category := vars["category"]
+
+Note that if any capturing groups are present, mux will panic() during parsing. To prevent
+this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to
+"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably
+when capturing groups were present.
+
+And this is all you need to know about the basic usage. More advanced options
+are explained below.
+
+Routes can also be restricted to a domain or subdomain. Just define a host
+pattern to be matched. They can also have variables:
+
+	r := mux.NewRouter()
+	// Only matches if domain is "www.example.com".
+	r.Host("www.example.com")
+	// Matches a dynamic subdomain.
+	r.Host("{subdomain:[a-z]+}.domain.com")
+
+There are several other matchers that can be added. To match path prefixes:
+
+	r.PathPrefix("/products/")
+
+...or HTTP methods:
+
+	r.Methods("GET", "POST")
+
+...or URL schemes:
+
+	r.Schemes("https")
+
+...or header values:
+
+	r.Headers("X-Requested-With", "XMLHttpRequest")
+
+...or query values:
+
+	r.Queries("key", "value")
+
+...or to use a custom matcher function:
+
+	r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool {
+		return r.ProtoMajor == 0
+	})
+
+...and finally, it is possible to combine several matchers in a single route:
+
+	r.HandleFunc("/products", ProductsHandler).
+	  Host("www.example.com").
+	  Methods("GET").
+	  Schemes("http")
+
+Setting the same matching conditions again and again can be boring, so we have
+a way to group several routes that share the same requirements.
+We call it "subrouting".
+
+For example, let's say we have several URLs that should only match when the
+host is "www.example.com". Create a route for that host and get a "subrouter"
+from it:
+
+	r := mux.NewRouter()
+	s := r.Host("www.example.com").Subrouter()
+
+Then register routes in the subrouter:
+
+	s.HandleFunc("/products/", ProductsHandler)
+	s.HandleFunc("/products/{key}", ProductHandler)
+	s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+
+The three URL paths we registered above will only be tested if the domain is
+"www.example.com", because the subrouter is tested first. This is not
+only convenient, but also optimizes request matching. You can create
+subrouters combining any attribute matchers accepted by a route.
+
+Subrouters can be used to create domain or path "namespaces": you define
+subrouters in a central place and then parts of the app can register its
+paths relatively to a given subrouter.
+
+There's one more thing about subroutes. When a subrouter has a path prefix,
+the inner routes use it as base for their paths:
+
+	r := mux.NewRouter()
+	s := r.PathPrefix("/products").Subrouter()
+	// "/products/"
+	s.HandleFunc("/", ProductsHandler)
+	// "/products/{key}/"
+	s.HandleFunc("/{key}/", ProductHandler)
+	// "/products/{key}/details"
+	s.HandleFunc("/{key}/details", ProductDetailsHandler)
+
+Note that the path provided to PathPrefix() represents a "wildcard": calling
+PathPrefix("/static/").Handler(...) means that the handler will be passed any
+request that matches "/static/*". This makes it easy to serve static files with mux:
+
+	func main() {
+		var dir string
+
+		flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir")
+		flag.Parse()
+		r := mux.NewRouter()
+
+		// This will serve files under http://localhost:8000/static/<filename>
+		r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir))))
+
+		srv := &http.Server{
+			Handler:      r,
+			Addr:         "127.0.0.1:8000",
+			// Good practice: enforce timeouts for servers you create!
+			WriteTimeout: 15 * time.Second,
+			ReadTimeout:  15 * time.Second,
+		}
+
+		log.Fatal(srv.ListenAndServe())
+	}
+
+Now let's see how to build registered URLs.
+
+Routes can be named. All routes that define a name can have their URLs built,
+or "reversed". We define a name calling Name() on a route. For example:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+	  Name("article")
+
+To build a URL, get the route and call the URL() method, passing a sequence of
+key/value pairs for the route variables. For the previous route, we would do:
+
+	url, err := r.Get("article").URL("category", "technology", "id", "42")
+
+...and the result will be a url.URL with the following path:
+
+	"/articles/technology/42"
+
+This also works for host and query value variables:
+
+	r := mux.NewRouter()
+	r.Host("{subdomain}.domain.com").
+	  Path("/articles/{category}/{id:[0-9]+}").
+	  Queries("filter", "{filter}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42",
+	                                 "filter", "gorilla")
+
+All variables defined in the route are required, and their values must
+conform to the corresponding patterns. These requirements guarantee that a
+generated URL will always match a registered route -- the only exception is
+for explicitly defined "build-only" routes which never match.
+
+Regex support also exists for matching Headers within a route. For example, we could do:
+
+	r.HeadersRegexp("Content-Type", "application/(text|json)")
+
+...and the route will match both requests with a Content-Type of `application/json` as well as
+`application/text`
+
+There's also a way to build only the URL host or path for a route:
+use the methods URLHost() or URLPath() instead. For the previous route,
+we would do:
+
+	// "http://news.domain.com/"
+	host, err := r.Get("article").URLHost("subdomain", "news")
+
+	// "/articles/technology/42"
+	path, err := r.Get("article").URLPath("category", "technology", "id", "42")
+
+And if you use subrouters, host and path defined separately can be built
+as well:
+
+	r := mux.NewRouter()
+	s := r.Host("{subdomain}.domain.com").Subrouter()
+	s.Path("/articles/{category}/{id:[0-9]+}").
+	  HandlerFunc(ArticleHandler).
+	  Name("article")
+
+	// "http://news.domain.com/articles/technology/42"
+	url, err := r.Get("article").URL("subdomain", "news",
+	                                 "category", "technology",
+	                                 "id", "42")
+
+Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking.
+
+	type MiddlewareFunc func(http.Handler) http.Handler
+
+Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created).
+
+A very basic middleware which logs the URI of the request being handled could be written as:
+
+	func simpleMw(next http.Handler) http.Handler {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			// Do stuff here
+			log.Println(r.RequestURI)
+			// Call the next handler, which can be another middleware in the chain, or the final handler.
+			next.ServeHTTP(w, r)
+		})
+	}
+
+Middlewares can be added to a router using `Router.Use()`:
+
+	r := mux.NewRouter()
+	r.HandleFunc("/", handler)
+	r.Use(simpleMw)
+
+A more complex authentication middleware, which maps session token to users, could be written as:
+
+	// Define our struct
+	type authenticationMiddleware struct {
+		tokenUsers map[string]string
+	}
+
+	// Initialize it somewhere
+	func (amw *authenticationMiddleware) Populate() {
+		amw.tokenUsers["00000000"] = "user0"
+		amw.tokenUsers["aaaaaaaa"] = "userA"
+		amw.tokenUsers["05f717e5"] = "randomUser"
+		amw.tokenUsers["deadbeef"] = "user0"
+	}
+
+	// Middleware function, which will be called for each request
+	func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler {
+		return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+			token := r.Header.Get("X-Session-Token")
+
+			if user, found := amw.tokenUsers[token]; found {
+				// We found the token in our map
+				log.Printf("Authenticated user %s\n", user)
+				next.ServeHTTP(w, r)
+			} else {
+				http.Error(w, "Forbidden", http.StatusForbidden)
+			}
+		})
+	}
+
+	r := mux.NewRouter()
+	r.HandleFunc("/", handler)
+
+	amw := authenticationMiddleware{tokenUsers: make(map[string]string)}
+	amw.Populate()
+
+	r.Use(amw.Middleware)
+
+Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to.
+
+*/
+package mux
diff --git a/vendor/github.com/gorilla/mux/go.mod b/vendor/github.com/gorilla/mux/go.mod
new file mode 100644
index 0000000..df170a3
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/go.mod
@@ -0,0 +1,3 @@
+module github.com/gorilla/mux
+
+go 1.12
diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go
new file mode 100644
index 0000000..cb51c56
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/middleware.go
@@ -0,0 +1,74 @@
+package mux
+
+import (
+	"net/http"
+	"strings"
+)
+
+// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler.
+// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed
+// to it, and then calls the handler passed as parameter to the MiddlewareFunc.
+type MiddlewareFunc func(http.Handler) http.Handler
+
+// middleware interface is anything which implements a MiddlewareFunc named Middleware.
+type middleware interface {
+	Middleware(handler http.Handler) http.Handler
+}
+
+// Middleware allows MiddlewareFunc to implement the middleware interface.
+func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler {
+	return mw(handler)
+}
+
+// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) Use(mwf ...MiddlewareFunc) {
+	for _, fn := range mwf {
+		r.middlewares = append(r.middlewares, fn)
+	}
+}
+
+// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router.
+func (r *Router) useInterface(mw middleware) {
+	r.middlewares = append(r.middlewares, mw)
+}
+
+// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header
+// on requests for routes that have an OPTIONS method matcher to all the method matchers on
+// the route. Routes that do not explicitly handle OPTIONS requests will not be processed
+// by the middleware. See examples for usage.
+func CORSMethodMiddleware(r *Router) MiddlewareFunc {
+	return func(next http.Handler) http.Handler {
+		return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) {
+			allMethods, err := getAllMethodsForRoute(r, req)
+			if err == nil {
+				for _, v := range allMethods {
+					if v == http.MethodOptions {
+						w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ","))
+					}
+				}
+			}
+
+			next.ServeHTTP(w, req)
+		})
+	}
+}
+
+// getAllMethodsForRoute returns all the methods from method matchers matching a given
+// request.
+func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) {
+	var allMethods []string
+
+	for _, route := range r.routes {
+		var match RouteMatch
+		if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch {
+			methods, err := route.GetMethods()
+			if err != nil {
+				return nil, err
+			}
+
+			allMethods = append(allMethods, methods...)
+		}
+	}
+
+	return allMethods, nil
+}
diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go
new file mode 100644
index 0000000..782a34b
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/mux.go
@@ -0,0 +1,606 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"net/http"
+	"path"
+	"regexp"
+)
+
+var (
+	// ErrMethodMismatch is returned when the method in the request does not match
+	// the method defined against the route.
+	ErrMethodMismatch = errors.New("method is not allowed")
+	// ErrNotFound is returned when no route match is found.
+	ErrNotFound = errors.New("no matching route was found")
+)
+
+// NewRouter returns a new router instance.
+func NewRouter() *Router {
+	return &Router{namedRoutes: make(map[string]*Route)}
+}
+
+// Router registers routes to be matched and dispatches a handler.
+//
+// It implements the http.Handler interface, so it can be registered to serve
+// requests:
+//
+//     var router = mux.NewRouter()
+//
+//     func main() {
+//         http.Handle("/", router)
+//     }
+//
+// Or, for Google App Engine, register it in a init() function:
+//
+//     func init() {
+//         http.Handle("/", router)
+//     }
+//
+// This will send all incoming requests to the router.
+type Router struct {
+	// Configurable Handler to be used when no route matches.
+	NotFoundHandler http.Handler
+
+	// Configurable Handler to be used when the request method does not match the route.
+	MethodNotAllowedHandler http.Handler
+
+	// Routes to be matched, in order.
+	routes []*Route
+
+	// Routes by name for URL building.
+	namedRoutes map[string]*Route
+
+	// If true, do not clear the request context after handling the request.
+	//
+	// Deprecated: No effect, since the context is stored on the request itself.
+	KeepContext bool
+
+	// Slice of middlewares to be called after a match is found
+	middlewares []middleware
+
+	// configuration shared with `Route`
+	routeConf
+}
+
+// common route configuration shared between `Router` and `Route`
+type routeConf struct {
+	// If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to"
+	useEncodedPath bool
+
+	// If true, when the path pattern is "/path/", accessing "/path" will
+	// redirect to the former and vice versa.
+	strictSlash bool
+
+	// If true, when the path pattern is "/path//to", accessing "/path//to"
+	// will not redirect
+	skipClean bool
+
+	// Manager for the variables from host and path.
+	regexp routeRegexpGroup
+
+	// List of matchers.
+	matchers []matcher
+
+	// The scheme used when building URLs.
+	buildScheme string
+
+	buildVarsFunc BuildVarsFunc
+}
+
+// returns an effective deep copy of `routeConf`
+func copyRouteConf(r routeConf) routeConf {
+	c := r
+
+	if r.regexp.path != nil {
+		c.regexp.path = copyRouteRegexp(r.regexp.path)
+	}
+
+	if r.regexp.host != nil {
+		c.regexp.host = copyRouteRegexp(r.regexp.host)
+	}
+
+	c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries))
+	for _, q := range r.regexp.queries {
+		c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q))
+	}
+
+	c.matchers = make([]matcher, len(r.matchers))
+	copy(c.matchers, r.matchers)
+
+	return c
+}
+
+func copyRouteRegexp(r *routeRegexp) *routeRegexp {
+	c := *r
+	return &c
+}
+
+// Match attempts to match the given request against the router's registered routes.
+//
+// If the request matches a route of this router or one of its subrouters the Route,
+// Handler, and Vars fields of the the match argument are filled and this function
+// returns true.
+//
+// If the request does not match any of this router's or its subrouters' routes
+// then this function returns false. If available, a reason for the match failure
+// will be filled in the match argument's MatchErr field. If the match failure type
+// (eg: not found) has a registered handler, the handler is assigned to the Handler
+// field of the match argument.
+func (r *Router) Match(req *http.Request, match *RouteMatch) bool {
+	for _, route := range r.routes {
+		if route.Match(req, match) {
+			// Build middleware chain if no error was found
+			if match.MatchErr == nil {
+				for i := len(r.middlewares) - 1; i >= 0; i-- {
+					match.Handler = r.middlewares[i].Middleware(match.Handler)
+				}
+			}
+			return true
+		}
+	}
+
+	if match.MatchErr == ErrMethodMismatch {
+		if r.MethodNotAllowedHandler != nil {
+			match.Handler = r.MethodNotAllowedHandler
+			return true
+		}
+
+		return false
+	}
+
+	// Closest match for a router (includes sub-routers)
+	if r.NotFoundHandler != nil {
+		match.Handler = r.NotFoundHandler
+		match.MatchErr = ErrNotFound
+		return true
+	}
+
+	match.MatchErr = ErrNotFound
+	return false
+}
+
+// ServeHTTP dispatches the handler registered in the matched route.
+//
+// When there is a match, the route variables can be retrieved calling
+// mux.Vars(request).
+func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) {
+	if !r.skipClean {
+		path := req.URL.Path
+		if r.useEncodedPath {
+			path = req.URL.EscapedPath()
+		}
+		// Clean path to canonical form and redirect.
+		if p := cleanPath(path); p != path {
+
+			// Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query.
+			// This matches with fix in go 1.2 r.c. 4 for same problem.  Go Issue:
+			// http://code.google.com/p/go/issues/detail?id=5252
+			url := *req.URL
+			url.Path = p
+			p = url.String()
+
+			w.Header().Set("Location", p)
+			w.WriteHeader(http.StatusMovedPermanently)
+			return
+		}
+	}
+	var match RouteMatch
+	var handler http.Handler
+	if r.Match(req, &match) {
+		handler = match.Handler
+		req = requestWithVars(req, match.Vars)
+		req = requestWithRoute(req, match.Route)
+	}
+
+	if handler == nil && match.MatchErr == ErrMethodMismatch {
+		handler = methodNotAllowedHandler()
+	}
+
+	if handler == nil {
+		handler = http.NotFoundHandler()
+	}
+
+	handler.ServeHTTP(w, req)
+}
+
+// Get returns a route registered with the given name.
+func (r *Router) Get(name string) *Route {
+	return r.namedRoutes[name]
+}
+
+// GetRoute returns a route registered with the given name. This method
+// was renamed to Get() and remains here for backwards compatibility.
+func (r *Router) GetRoute(name string) *Route {
+	return r.namedRoutes[name]
+}
+
+// StrictSlash defines the trailing slash behavior for new routes. The initial
+// value is false.
+//
+// When true, if the route path is "/path/", accessing "/path" will perform a redirect
+// to the former and vice versa. In other words, your application will always
+// see the path as specified in the route.
+//
+// When false, if the route path is "/path", accessing "/path/" will not match
+// this route and vice versa.
+//
+// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for
+// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed
+// request will be made as a GET by most clients. Use middleware or client settings
+// to modify this behaviour as needed.
+//
+// Special case: when a route sets a path prefix using the PathPrefix() method,
+// strict slash is ignored for that route because the redirect behavior can't
+// be determined from a prefix alone. However, any subrouters created from that
+// route inherit the original StrictSlash setting.
+func (r *Router) StrictSlash(value bool) *Router {
+	r.strictSlash = value
+	return r
+}
+
+// SkipClean defines the path cleaning behaviour for new routes. The initial
+// value is false. Users should be careful about which routes are not cleaned
+//
+// When true, if the route path is "/path//to", it will remain with the double
+// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/
+//
+// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will
+// become /fetch/http/xkcd.com/534
+func (r *Router) SkipClean(value bool) *Router {
+	r.skipClean = value
+	return r
+}
+
+// UseEncodedPath tells the router to match the encoded original path
+// to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to".
+//
+// If not called, the router will match the unencoded path to the routes.
+// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to"
+func (r *Router) UseEncodedPath() *Router {
+	r.useEncodedPath = true
+	return r
+}
+
+// ----------------------------------------------------------------------------
+// Route factories
+// ----------------------------------------------------------------------------
+
+// NewRoute registers an empty route.
+func (r *Router) NewRoute() *Route {
+	// initialize a route with a copy of the parent router's configuration
+	route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+	r.routes = append(r.routes, route)
+	return route
+}
+
+// Name registers a new route with a name.
+// See Route.Name().
+func (r *Router) Name(name string) *Route {
+	return r.NewRoute().Name(name)
+}
+
+// Handle registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.Handler().
+func (r *Router) Handle(path string, handler http.Handler) *Route {
+	return r.NewRoute().Path(path).Handler(handler)
+}
+
+// HandleFunc registers a new route with a matcher for the URL path.
+// See Route.Path() and Route.HandlerFunc().
+func (r *Router) HandleFunc(path string, f func(http.ResponseWriter,
+	*http.Request)) *Route {
+	return r.NewRoute().Path(path).HandlerFunc(f)
+}
+
+// Headers registers a new route with a matcher for request header values.
+// See Route.Headers().
+func (r *Router) Headers(pairs ...string) *Route {
+	return r.NewRoute().Headers(pairs...)
+}
+
+// Host registers a new route with a matcher for the URL host.
+// See Route.Host().
+func (r *Router) Host(tpl string) *Route {
+	return r.NewRoute().Host(tpl)
+}
+
+// MatcherFunc registers a new route with a custom matcher function.
+// See Route.MatcherFunc().
+func (r *Router) MatcherFunc(f MatcherFunc) *Route {
+	return r.NewRoute().MatcherFunc(f)
+}
+
+// Methods registers a new route with a matcher for HTTP methods.
+// See Route.Methods().
+func (r *Router) Methods(methods ...string) *Route {
+	return r.NewRoute().Methods(methods...)
+}
+
+// Path registers a new route with a matcher for the URL path.
+// See Route.Path().
+func (r *Router) Path(tpl string) *Route {
+	return r.NewRoute().Path(tpl)
+}
+
+// PathPrefix registers a new route with a matcher for the URL path prefix.
+// See Route.PathPrefix().
+func (r *Router) PathPrefix(tpl string) *Route {
+	return r.NewRoute().PathPrefix(tpl)
+}
+
+// Queries registers a new route with a matcher for URL query values.
+// See Route.Queries().
+func (r *Router) Queries(pairs ...string) *Route {
+	return r.NewRoute().Queries(pairs...)
+}
+
+// Schemes registers a new route with a matcher for URL schemes.
+// See Route.Schemes().
+func (r *Router) Schemes(schemes ...string) *Route {
+	return r.NewRoute().Schemes(schemes...)
+}
+
+// BuildVarsFunc registers a new route with a custom function for modifying
+// route variables before building a URL.
+func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route {
+	return r.NewRoute().BuildVarsFunc(f)
+}
+
+// Walk walks the router and all its sub-routers, calling walkFn for each route
+// in the tree. The routes are walked in the order they were added. Sub-routers
+// are explored depth-first.
+func (r *Router) Walk(walkFn WalkFunc) error {
+	return r.walk(walkFn, []*Route{})
+}
+
+// SkipRouter is used as a return value from WalkFuncs to indicate that the
+// router that walk is about to descend down to should be skipped.
+var SkipRouter = errors.New("skip this router")
+
+// WalkFunc is the type of the function called for each route visited by Walk.
+// At every invocation, it is given the current route, and the current router,
+// and a list of ancestor routes that lead to the current route.
+type WalkFunc func(route *Route, router *Router, ancestors []*Route) error
+
+func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error {
+	for _, t := range r.routes {
+		err := walkFn(t, r, ancestors)
+		if err == SkipRouter {
+			continue
+		}
+		if err != nil {
+			return err
+		}
+		for _, sr := range t.matchers {
+			if h, ok := sr.(*Router); ok {
+				ancestors = append(ancestors, t)
+				err := h.walk(walkFn, ancestors)
+				if err != nil {
+					return err
+				}
+				ancestors = ancestors[:len(ancestors)-1]
+			}
+		}
+		if h, ok := t.handler.(*Router); ok {
+			ancestors = append(ancestors, t)
+			err := h.walk(walkFn, ancestors)
+			if err != nil {
+				return err
+			}
+			ancestors = ancestors[:len(ancestors)-1]
+		}
+	}
+	return nil
+}
+
+// ----------------------------------------------------------------------------
+// Context
+// ----------------------------------------------------------------------------
+
+// RouteMatch stores information about a matched route.
+type RouteMatch struct {
+	Route   *Route
+	Handler http.Handler
+	Vars    map[string]string
+
+	// MatchErr is set to appropriate matching error
+	// It is set to ErrMethodMismatch if there is a mismatch in
+	// the request method and route method
+	MatchErr error
+}
+
+type contextKey int
+
+const (
+	varsKey contextKey = iota
+	routeKey
+)
+
+// Vars returns the route variables for the current request, if any.
+func Vars(r *http.Request) map[string]string {
+	if rv := r.Context().Value(varsKey); rv != nil {
+		return rv.(map[string]string)
+	}
+	return nil
+}
+
+// CurrentRoute returns the matched route for the current request, if any.
+// This only works when called inside the handler of the matched route
+// because the matched route is stored in the request context which is cleared
+// after the handler returns.
+func CurrentRoute(r *http.Request) *Route {
+	if rv := r.Context().Value(routeKey); rv != nil {
+		return rv.(*Route)
+	}
+	return nil
+}
+
+func requestWithVars(r *http.Request, vars map[string]string) *http.Request {
+	ctx := context.WithValue(r.Context(), varsKey, vars)
+	return r.WithContext(ctx)
+}
+
+func requestWithRoute(r *http.Request, route *Route) *http.Request {
+	ctx := context.WithValue(r.Context(), routeKey, route)
+	return r.WithContext(ctx)
+}
+
+// ----------------------------------------------------------------------------
+// Helpers
+// ----------------------------------------------------------------------------
+
+// cleanPath returns the canonical path for p, eliminating . and .. elements.
+// Borrowed from the net/http package.
+func cleanPath(p string) string {
+	if p == "" {
+		return "/"
+	}
+	if p[0] != '/' {
+		p = "/" + p
+	}
+	np := path.Clean(p)
+	// path.Clean removes trailing slash except for root;
+	// put the trailing slash back if necessary.
+	if p[len(p)-1] == '/' && np != "/" {
+		np += "/"
+	}
+
+	return np
+}
+
+// uniqueVars returns an error if two slices contain duplicated strings.
+func uniqueVars(s1, s2 []string) error {
+	for _, v1 := range s1 {
+		for _, v2 := range s2 {
+			if v1 == v2 {
+				return fmt.Errorf("mux: duplicated route variable %q", v2)
+			}
+		}
+	}
+	return nil
+}
+
+// checkPairs returns the count of strings passed in, and an error if
+// the count is not an even number.
+func checkPairs(pairs ...string) (int, error) {
+	length := len(pairs)
+	if length%2 != 0 {
+		return length, fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+	}
+	return length, nil
+}
+
+// mapFromPairsToString converts variadic string parameters to a
+// string to string map.
+func mapFromPairsToString(pairs ...string) (map[string]string, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]string, length/2)
+	for i := 0; i < length; i += 2 {
+		m[pairs[i]] = pairs[i+1]
+	}
+	return m, nil
+}
+
+// mapFromPairsToRegex converts variadic string parameters to a
+// string to regex map.
+func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) {
+	length, err := checkPairs(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	m := make(map[string]*regexp.Regexp, length/2)
+	for i := 0; i < length; i += 2 {
+		regex, err := regexp.Compile(pairs[i+1])
+		if err != nil {
+			return nil, err
+		}
+		m[pairs[i]] = regex
+	}
+	return m, nil
+}
+
+// matchInArray returns true if the given string value is in the array.
+func matchInArray(arr []string, value string) bool {
+	for _, v := range arr {
+		if v == value {
+			return true
+		}
+	}
+	return false
+}
+
+// matchMapWithString returns true if the given key/value pairs exist in a given map.
+func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != "" {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v == value {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against
+// the given regex
+func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool {
+	for k, v := range toCheck {
+		// Check if key exists.
+		if canonicalKey {
+			k = http.CanonicalHeaderKey(k)
+		}
+		if values := toMatch[k]; values == nil {
+			return false
+		} else if v != nil {
+			// If value was defined as an empty string we only check that the
+			// key exists. Otherwise we also check for equality.
+			valueExists := false
+			for _, value := range values {
+				if v.MatchString(value) {
+					valueExists = true
+					break
+				}
+			}
+			if !valueExists {
+				return false
+			}
+		}
+	}
+	return true
+}
+
+// methodNotAllowed replies to the request with an HTTP status code 405.
+func methodNotAllowed(w http.ResponseWriter, r *http.Request) {
+	w.WriteHeader(http.StatusMethodNotAllowed)
+}
+
+// methodNotAllowedHandler returns a simple request handler
+// that replies to each request with a status code 405.
+func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) }
diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go
new file mode 100644
index 0000000..0144842
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/regexp.go
@@ -0,0 +1,388 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"bytes"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strconv"
+	"strings"
+)
+
+type routeRegexpOptions struct {
+	strictSlash    bool
+	useEncodedPath bool
+}
+
+type regexpType int
+
+const (
+	regexpTypePath   regexpType = 0
+	regexpTypeHost   regexpType = 1
+	regexpTypePrefix regexpType = 2
+	regexpTypeQuery  regexpType = 3
+)
+
+// newRouteRegexp parses a route template and returns a routeRegexp,
+// used to match a host, a path or a query string.
+//
+// It will extract named variables, assemble a regexp to be matched, create
+// a "reverse" template to build URLs and compile regexps to validate variable
+// values used in URL building.
+//
+// Previously we accepted only Python-like identifiers for variable
+// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that
+// name and pattern can't be empty, and names can't contain a colon.
+func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) {
+	// Check if it is well-formed.
+	idxs, errBraces := braceIndices(tpl)
+	if errBraces != nil {
+		return nil, errBraces
+	}
+	// Backup the original.
+	template := tpl
+	// Now let's parse it.
+	defaultPattern := "[^/]+"
+	if typ == regexpTypeQuery {
+		defaultPattern = ".*"
+	} else if typ == regexpTypeHost {
+		defaultPattern = "[^.]+"
+	}
+	// Only match strict slash if not matching
+	if typ != regexpTypePath {
+		options.strictSlash = false
+	}
+	// Set a flag for strictSlash.
+	endSlash := false
+	if options.strictSlash && strings.HasSuffix(tpl, "/") {
+		tpl = tpl[:len(tpl)-1]
+		endSlash = true
+	}
+	varsN := make([]string, len(idxs)/2)
+	varsR := make([]*regexp.Regexp, len(idxs)/2)
+	pattern := bytes.NewBufferString("")
+	pattern.WriteByte('^')
+	reverse := bytes.NewBufferString("")
+	var end int
+	var err error
+	for i := 0; i < len(idxs); i += 2 {
+		// Set all values we are interested in.
+		raw := tpl[end:idxs[i]]
+		end = idxs[i+1]
+		parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2)
+		name := parts[0]
+		patt := defaultPattern
+		if len(parts) == 2 {
+			patt = parts[1]
+		}
+		// Name or pattern can't be empty.
+		if name == "" || patt == "" {
+			return nil, fmt.Errorf("mux: missing name or pattern in %q",
+				tpl[idxs[i]:end])
+		}
+		// Build the regexp pattern.
+		fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt)
+
+		// Build the reverse template.
+		fmt.Fprintf(reverse, "%s%%s", raw)
+
+		// Append variable name and compiled pattern.
+		varsN[i/2] = name
+		varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt))
+		if err != nil {
+			return nil, err
+		}
+	}
+	// Add the remaining.
+	raw := tpl[end:]
+	pattern.WriteString(regexp.QuoteMeta(raw))
+	if options.strictSlash {
+		pattern.WriteString("[/]?")
+	}
+	if typ == regexpTypeQuery {
+		// Add the default pattern if the query value is empty
+		if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" {
+			pattern.WriteString(defaultPattern)
+		}
+	}
+	if typ != regexpTypePrefix {
+		pattern.WriteByte('$')
+	}
+
+	var wildcardHostPort bool
+	if typ == regexpTypeHost {
+		if !strings.Contains(pattern.String(), ":") {
+			wildcardHostPort = true
+		}
+	}
+	reverse.WriteString(raw)
+	if endSlash {
+		reverse.WriteByte('/')
+	}
+	// Compile full regexp.
+	reg, errCompile := regexp.Compile(pattern.String())
+	if errCompile != nil {
+		return nil, errCompile
+	}
+
+	// Check for capturing groups which used to work in older versions
+	if reg.NumSubexp() != len(idxs)/2 {
+		panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) +
+			"Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)")
+	}
+
+	// Done!
+	return &routeRegexp{
+		template:         template,
+		regexpType:       typ,
+		options:          options,
+		regexp:           reg,
+		reverse:          reverse.String(),
+		varsN:            varsN,
+		varsR:            varsR,
+		wildcardHostPort: wildcardHostPort,
+	}, nil
+}
+
+// routeRegexp stores a regexp to match a host or path and information to
+// collect and validate route variables.
+type routeRegexp struct {
+	// The unmodified template.
+	template string
+	// The type of match
+	regexpType regexpType
+	// Options for matching
+	options routeRegexpOptions
+	// Expanded regexp.
+	regexp *regexp.Regexp
+	// Reverse template.
+	reverse string
+	// Variable names.
+	varsN []string
+	// Variable regexps (validators).
+	varsR []*regexp.Regexp
+	// Wildcard host-port (no strict port match in hostname)
+	wildcardHostPort bool
+}
+
+// Match matches the regexp against the URL host or path.
+func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool {
+	if r.regexpType == regexpTypeHost {
+		host := getHost(req)
+		if r.wildcardHostPort {
+			// Don't be strict on the port match
+			if i := strings.Index(host, ":"); i != -1 {
+				host = host[:i]
+			}
+		}
+		return r.regexp.MatchString(host)
+	}
+
+	if r.regexpType == regexpTypeQuery {
+		return r.matchQueryString(req)
+	}
+	path := req.URL.Path
+	if r.options.useEncodedPath {
+		path = req.URL.EscapedPath()
+	}
+	return r.regexp.MatchString(path)
+}
+
+// url builds a URL part using the given values.
+func (r *routeRegexp) url(values map[string]string) (string, error) {
+	urlValues := make([]interface{}, len(r.varsN), len(r.varsN))
+	for k, v := range r.varsN {
+		value, ok := values[v]
+		if !ok {
+			return "", fmt.Errorf("mux: missing route variable %q", v)
+		}
+		if r.regexpType == regexpTypeQuery {
+			value = url.QueryEscape(value)
+		}
+		urlValues[k] = value
+	}
+	rv := fmt.Sprintf(r.reverse, urlValues...)
+	if !r.regexp.MatchString(rv) {
+		// The URL is checked against the full regexp, instead of checking
+		// individual variables. This is faster but to provide a good error
+		// message, we check individual regexps if the URL doesn't match.
+		for k, v := range r.varsN {
+			if !r.varsR[k].MatchString(values[v]) {
+				return "", fmt.Errorf(
+					"mux: variable %q doesn't match, expected %q", values[v],
+					r.varsR[k].String())
+			}
+		}
+	}
+	return rv, nil
+}
+
+// getURLQuery returns a single query parameter from a request URL.
+// For a URL with foo=bar&baz=ding, we return only the relevant key
+// value pair for the routeRegexp.
+func (r *routeRegexp) getURLQuery(req *http.Request) string {
+	if r.regexpType != regexpTypeQuery {
+		return ""
+	}
+	templateKey := strings.SplitN(r.template, "=", 2)[0]
+	val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey)
+	if ok {
+		return templateKey + "=" + val
+	}
+	return ""
+}
+
+// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0].
+// If key was not found, empty string and false is returned.
+func findFirstQueryKey(rawQuery, key string) (value string, ok bool) {
+	query := []byte(rawQuery)
+	for len(query) > 0 {
+		foundKey := query
+		if i := bytes.IndexAny(foundKey, "&;"); i >= 0 {
+			foundKey, query = foundKey[:i], foundKey[i+1:]
+		} else {
+			query = query[:0]
+		}
+		if len(foundKey) == 0 {
+			continue
+		}
+		var value []byte
+		if i := bytes.IndexByte(foundKey, '='); i >= 0 {
+			foundKey, value = foundKey[:i], foundKey[i+1:]
+		}
+		if len(foundKey) < len(key) {
+			// Cannot possibly be key.
+			continue
+		}
+		keyString, err := url.QueryUnescape(string(foundKey))
+		if err != nil {
+			continue
+		}
+		if keyString != key {
+			continue
+		}
+		valueString, err := url.QueryUnescape(string(value))
+		if err != nil {
+			continue
+		}
+		return valueString, true
+	}
+	return "", false
+}
+
+func (r *routeRegexp) matchQueryString(req *http.Request) bool {
+	return r.regexp.MatchString(r.getURLQuery(req))
+}
+
+// braceIndices returns the first level curly brace indices from a string.
+// It returns an error in case of unbalanced braces.
+func braceIndices(s string) ([]int, error) {
+	var level, idx int
+	var idxs []int
+	for i := 0; i < len(s); i++ {
+		switch s[i] {
+		case '{':
+			if level++; level == 1 {
+				idx = i
+			}
+		case '}':
+			if level--; level == 0 {
+				idxs = append(idxs, idx, i+1)
+			} else if level < 0 {
+				return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+			}
+		}
+	}
+	if level != 0 {
+		return nil, fmt.Errorf("mux: unbalanced braces in %q", s)
+	}
+	return idxs, nil
+}
+
+// varGroupName builds a capturing group name for the indexed variable.
+func varGroupName(idx int) string {
+	return "v" + strconv.Itoa(idx)
+}
+
+// ----------------------------------------------------------------------------
+// routeRegexpGroup
+// ----------------------------------------------------------------------------
+
+// routeRegexpGroup groups the route matchers that carry variables.
+type routeRegexpGroup struct {
+	host    *routeRegexp
+	path    *routeRegexp
+	queries []*routeRegexp
+}
+
+// setMatch extracts the variables from the URL once a route matches.
+func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) {
+	// Store host variables.
+	if v.host != nil {
+		host := getHost(req)
+		if v.host.wildcardHostPort {
+			// Don't be strict on the port match
+			if i := strings.Index(host, ":"); i != -1 {
+				host = host[:i]
+			}
+		}
+		matches := v.host.regexp.FindStringSubmatchIndex(host)
+		if len(matches) > 0 {
+			extractVars(host, matches, v.host.varsN, m.Vars)
+		}
+	}
+	path := req.URL.Path
+	if r.useEncodedPath {
+		path = req.URL.EscapedPath()
+	}
+	// Store path variables.
+	if v.path != nil {
+		matches := v.path.regexp.FindStringSubmatchIndex(path)
+		if len(matches) > 0 {
+			extractVars(path, matches, v.path.varsN, m.Vars)
+			// Check if we should redirect.
+			if v.path.options.strictSlash {
+				p1 := strings.HasSuffix(path, "/")
+				p2 := strings.HasSuffix(v.path.template, "/")
+				if p1 != p2 {
+					u, _ := url.Parse(req.URL.String())
+					if p1 {
+						u.Path = u.Path[:len(u.Path)-1]
+					} else {
+						u.Path += "/"
+					}
+					m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently)
+				}
+			}
+		}
+	}
+	// Store query string variables.
+	for _, q := range v.queries {
+		queryURL := q.getURLQuery(req)
+		matches := q.regexp.FindStringSubmatchIndex(queryURL)
+		if len(matches) > 0 {
+			extractVars(queryURL, matches, q.varsN, m.Vars)
+		}
+	}
+}
+
+// getHost tries its best to return the request host.
+// According to section 14.23 of RFC 2616 the Host header
+// can include the port number if the default value of 80 is not used.
+func getHost(r *http.Request) string {
+	if r.URL.IsAbs() {
+		return r.URL.Host
+	}
+	return r.Host
+}
+
+func extractVars(input string, matches []int, names []string, output map[string]string) {
+	for i, name := range names {
+		output[name] = input[matches[2*i+2]:matches[2*i+3]]
+	}
+}
diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go
new file mode 100644
index 0000000..750afe5
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/route.go
@@ -0,0 +1,736 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import (
+	"errors"
+	"fmt"
+	"net/http"
+	"net/url"
+	"regexp"
+	"strings"
+)
+
+// Route stores information to match a request and build URLs.
+type Route struct {
+	// Request handler for the route.
+	handler http.Handler
+	// If true, this route never matches: it is only used to build URLs.
+	buildOnly bool
+	// The name used to build URLs.
+	name string
+	// Error resulted from building a route.
+	err error
+
+	// "global" reference to all named routes
+	namedRoutes map[string]*Route
+
+	// config possibly passed in from `Router`
+	routeConf
+}
+
+// SkipClean reports whether path cleaning is enabled for this route via
+// Router.SkipClean.
+func (r *Route) SkipClean() bool {
+	return r.skipClean
+}
+
+// Match matches the route against the request.
+func (r *Route) Match(req *http.Request, match *RouteMatch) bool {
+	if r.buildOnly || r.err != nil {
+		return false
+	}
+
+	var matchErr error
+
+	// Match everything.
+	for _, m := range r.matchers {
+		if matched := m.Match(req, match); !matched {
+			if _, ok := m.(methodMatcher); ok {
+				matchErr = ErrMethodMismatch
+				continue
+			}
+
+			// Ignore ErrNotFound errors. These errors arise from match call
+			// to Subrouters.
+			//
+			// This prevents subsequent matching subrouters from failing to
+			// run middleware. If not ignored, the middleware would see a
+			// non-nil MatchErr and be skipped, even when there was a
+			// matching route.
+			if match.MatchErr == ErrNotFound {
+				match.MatchErr = nil
+			}
+
+			matchErr = nil
+			return false
+		}
+	}
+
+	if matchErr != nil {
+		match.MatchErr = matchErr
+		return false
+	}
+
+	if match.MatchErr == ErrMethodMismatch && r.handler != nil {
+		// We found a route which matches request method, clear MatchErr
+		match.MatchErr = nil
+		// Then override the mis-matched handler
+		match.Handler = r.handler
+	}
+
+	// Yay, we have a match. Let's collect some info about it.
+	if match.Route == nil {
+		match.Route = r
+	}
+	if match.Handler == nil {
+		match.Handler = r.handler
+	}
+	if match.Vars == nil {
+		match.Vars = make(map[string]string)
+	}
+
+	// Set variables.
+	r.regexp.setMatch(req, match, r)
+	return true
+}
+
+// ----------------------------------------------------------------------------
+// Route attributes
+// ----------------------------------------------------------------------------
+
+// GetError returns an error resulted from building the route, if any.
+func (r *Route) GetError() error {
+	return r.err
+}
+
+// BuildOnly sets the route to never match: it is only used to build URLs.
+func (r *Route) BuildOnly() *Route {
+	r.buildOnly = true
+	return r
+}
+
+// Handler --------------------------------------------------------------------
+
+// Handler sets a handler for the route.
+func (r *Route) Handler(handler http.Handler) *Route {
+	if r.err == nil {
+		r.handler = handler
+	}
+	return r
+}
+
+// HandlerFunc sets a handler function for the route.
+func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route {
+	return r.Handler(http.HandlerFunc(f))
+}
+
+// GetHandler returns the handler for the route, if any.
+func (r *Route) GetHandler() http.Handler {
+	return r.handler
+}
+
+// Name -----------------------------------------------------------------------
+
+// Name sets the name for the route, used to build URLs.
+// It is an error to call Name more than once on a route.
+func (r *Route) Name(name string) *Route {
+	if r.name != "" {
+		r.err = fmt.Errorf("mux: route already has name %q, can't set %q",
+			r.name, name)
+	}
+	if r.err == nil {
+		r.name = name
+		r.namedRoutes[name] = r
+	}
+	return r
+}
+
+// GetName returns the name for the route, if any.
+func (r *Route) GetName() string {
+	return r.name
+}
+
+// ----------------------------------------------------------------------------
+// Matchers
+// ----------------------------------------------------------------------------
+
+// matcher types try to match a request.
+type matcher interface {
+	Match(*http.Request, *RouteMatch) bool
+}
+
+// addMatcher adds a matcher to the route.
+func (r *Route) addMatcher(m matcher) *Route {
+	if r.err == nil {
+		r.matchers = append(r.matchers, m)
+	}
+	return r
+}
+
+// addRegexpMatcher adds a host or path matcher and builder to a route.
+func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error {
+	if r.err != nil {
+		return r.err
+	}
+	if typ == regexpTypePath || typ == regexpTypePrefix {
+		if len(tpl) > 0 && tpl[0] != '/' {
+			return fmt.Errorf("mux: path must start with a slash, got %q", tpl)
+		}
+		if r.regexp.path != nil {
+			tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl
+		}
+	}
+	rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{
+		strictSlash:    r.strictSlash,
+		useEncodedPath: r.useEncodedPath,
+	})
+	if err != nil {
+		return err
+	}
+	for _, q := range r.regexp.queries {
+		if err = uniqueVars(rr.varsN, q.varsN); err != nil {
+			return err
+		}
+	}
+	if typ == regexpTypeHost {
+		if r.regexp.path != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil {
+				return err
+			}
+		}
+		r.regexp.host = rr
+	} else {
+		if r.regexp.host != nil {
+			if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil {
+				return err
+			}
+		}
+		if typ == regexpTypeQuery {
+			r.regexp.queries = append(r.regexp.queries, rr)
+		} else {
+			r.regexp.path = rr
+		}
+	}
+	r.addMatcher(rr)
+	return nil
+}
+
+// Headers --------------------------------------------------------------------
+
+// headerMatcher matches the request against header values.
+type headerMatcher map[string]string
+
+func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithString(m, r.Header, true)
+}
+
+// Headers adds a matcher for request header values.
+// It accepts a sequence of key/value pairs to be matched. For example:
+//
+//     r := mux.NewRouter()
+//     r.Headers("Content-Type", "application/json",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both request header values match.
+// If the value is an empty string, it will match any value if the key is set.
+func (r *Route) Headers(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]string
+		headers, r.err = mapFromPairsToString(pairs...)
+		return r.addMatcher(headerMatcher(headers))
+	}
+	return r
+}
+
+// headerRegexMatcher matches the request against the route given a regex for the header
+type headerRegexMatcher map[string]*regexp.Regexp
+
+func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchMapWithRegex(m, r.Header, true)
+}
+
+// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex
+// support. For example:
+//
+//     r := mux.NewRouter()
+//     r.HeadersRegexp("Content-Type", "application/(text|json)",
+//               "X-Requested-With", "XMLHttpRequest")
+//
+// The above route will only match if both the request header matches both regular expressions.
+// If the value is an empty string, it will match any value if the key is set.
+// Use the start and end of string anchors (^ and $) to match an exact value.
+func (r *Route) HeadersRegexp(pairs ...string) *Route {
+	if r.err == nil {
+		var headers map[string]*regexp.Regexp
+		headers, r.err = mapFromPairsToRegex(pairs...)
+		return r.addMatcher(headerRegexMatcher(headers))
+	}
+	return r
+}
+
+// Host -----------------------------------------------------------------------
+
+// Host adds a matcher for the URL host.
+// It accepts a template with zero or more URL variables enclosed by {}.
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next dot.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Host("www.example.com")
+//     r.Host("{subdomain}.domain.com")
+//     r.Host("{subdomain:[a-z]+}.domain.com")
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Host(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, regexpTypeHost)
+	return r
+}
+
+// MatcherFunc ----------------------------------------------------------------
+
+// MatcherFunc is the function signature used by custom matchers.
+type MatcherFunc func(*http.Request, *RouteMatch) bool
+
+// Match returns the match for a given request.
+func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool {
+	return m(r, match)
+}
+
+// MatcherFunc adds a custom function to be used as request matcher.
+func (r *Route) MatcherFunc(f MatcherFunc) *Route {
+	return r.addMatcher(f)
+}
+
+// Methods --------------------------------------------------------------------
+
+// methodMatcher matches the request against HTTP methods.
+type methodMatcher []string
+
+func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	return matchInArray(m, r.Method)
+}
+
+// Methods adds a matcher for HTTP methods.
+// It accepts a sequence of one or more methods to be matched, e.g.:
+// "GET", "POST", "PUT".
+func (r *Route) Methods(methods ...string) *Route {
+	for k, v := range methods {
+		methods[k] = strings.ToUpper(v)
+	}
+	return r.addMatcher(methodMatcher(methods))
+}
+
+// Path -----------------------------------------------------------------------
+
+// Path adds a matcher for the URL path.
+// It accepts a template with zero or more URL variables enclosed by {}. The
+// template must start with a "/".
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+//
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Path("/products/").Handler(ProductsHandler)
+//     r.Path("/products/{key}").Handler(ProductsHandler)
+//     r.Path("/articles/{category}/{id:[0-9]+}").
+//       Handler(ArticleHandler)
+//
+// Variable names must be unique in a given route. They can be retrieved
+// calling mux.Vars(request).
+func (r *Route) Path(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, regexpTypePath)
+	return r
+}
+
+// PathPrefix -----------------------------------------------------------------
+
+// PathPrefix adds a matcher for the URL path prefix. This matches if the given
+// template is a prefix of the full URL path. See Route.Path() for details on
+// the tpl argument.
+//
+// Note that it does not treat slashes specially ("/foobar/" will be matched by
+// the prefix "/foo") so you may want to use a trailing slash here.
+//
+// Also note that the setting of Router.StrictSlash() has no effect on routes
+// with a PathPrefix matcher.
+func (r *Route) PathPrefix(tpl string) *Route {
+	r.err = r.addRegexpMatcher(tpl, regexpTypePrefix)
+	return r
+}
+
+// Query ----------------------------------------------------------------------
+
+// Queries adds a matcher for URL query values.
+// It accepts a sequence of key/value pairs. Values may define variables.
+// For example:
+//
+//     r := mux.NewRouter()
+//     r.Queries("foo", "bar", "id", "{id:[0-9]+}")
+//
+// The above route will only match if the URL contains the defined queries
+// values, e.g.: ?foo=bar&id=42.
+//
+// If the value is an empty string, it will match any value if the key is set.
+//
+// Variables can define an optional regexp pattern to be matched:
+//
+// - {name} matches anything until the next slash.
+//
+// - {name:pattern} matches the given regexp pattern.
+func (r *Route) Queries(pairs ...string) *Route {
+	length := len(pairs)
+	if length%2 != 0 {
+		r.err = fmt.Errorf(
+			"mux: number of parameters must be multiple of 2, got %v", pairs)
+		return nil
+	}
+	for i := 0; i < length; i += 2 {
+		if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil {
+			return r
+		}
+	}
+
+	return r
+}
+
+// Schemes --------------------------------------------------------------------
+
+// schemeMatcher matches the request against URL schemes.
+type schemeMatcher []string
+
+func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool {
+	scheme := r.URL.Scheme
+	// https://golang.org/pkg/net/http/#Request
+	// "For [most] server requests, fields other than Path and RawQuery will be
+	// empty."
+	// Since we're an http muxer, the scheme is either going to be http or https
+	// though, so we can just set it based on the tls termination state.
+	if scheme == "" {
+		if r.TLS == nil {
+			scheme = "http"
+		} else {
+			scheme = "https"
+		}
+	}
+	return matchInArray(m, scheme)
+}
+
+// Schemes adds a matcher for URL schemes.
+// It accepts a sequence of schemes to be matched, e.g.: "http", "https".
+// If the request's URL has a scheme set, it will be matched against.
+// Generally, the URL scheme will only be set if a previous handler set it,
+// such as the ProxyHeaders handler from gorilla/handlers.
+// If unset, the scheme will be determined based on the request's TLS
+// termination state.
+// The first argument to Schemes will be used when constructing a route URL.
+func (r *Route) Schemes(schemes ...string) *Route {
+	for k, v := range schemes {
+		schemes[k] = strings.ToLower(v)
+	}
+	if len(schemes) > 0 {
+		r.buildScheme = schemes[0]
+	}
+	return r.addMatcher(schemeMatcher(schemes))
+}
+
+// BuildVarsFunc --------------------------------------------------------------
+
+// BuildVarsFunc is the function signature used by custom build variable
+// functions (which can modify route variables before a route's URL is built).
+type BuildVarsFunc func(map[string]string) map[string]string
+
+// BuildVarsFunc adds a custom function to be used to modify build variables
+// before a route's URL is built.
+func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route {
+	if r.buildVarsFunc != nil {
+		// compose the old and new functions
+		old := r.buildVarsFunc
+		r.buildVarsFunc = func(m map[string]string) map[string]string {
+			return f(old(m))
+		}
+	} else {
+		r.buildVarsFunc = f
+	}
+	return r
+}
+
+// Subrouter ------------------------------------------------------------------
+
+// Subrouter creates a subrouter for the route.
+//
+// It will test the inner routes only if the parent route matched. For example:
+//
+//     r := mux.NewRouter()
+//     s := r.Host("www.example.com").Subrouter()
+//     s.HandleFunc("/products/", ProductsHandler)
+//     s.HandleFunc("/products/{key}", ProductHandler)
+//     s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler)
+//
+// Here, the routes registered in the subrouter won't be tested if the host
+// doesn't match.
+func (r *Route) Subrouter() *Router {
+	// initialize a subrouter with a copy of the parent route's configuration
+	router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes}
+	r.addMatcher(router)
+	return router
+}
+
+// ----------------------------------------------------------------------------
+// URL building
+// ----------------------------------------------------------------------------
+
+// URL builds a URL for the route.
+//
+// It accepts a sequence of key/value pairs for the route variables. For
+// example, given this route:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Name("article")
+//
+// ...a URL for it can be built using:
+//
+//     url, err := r.Get("article").URL("category", "technology", "id", "42")
+//
+// ...which will return an url.URL with the following path:
+//
+//     "/articles/technology/42"
+//
+// This also works for host variables:
+//
+//     r := mux.NewRouter()
+//     r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler).
+//       Host("{subdomain}.domain.com").
+//       Name("article")
+//
+//     // url.String() will be "http://news.domain.com/articles/technology/42"
+//     url, err := r.Get("article").URL("subdomain", "news",
+//                                      "category", "technology",
+//                                      "id", "42")
+//
+// The scheme of the resulting url will be the first argument that was passed to Schemes:
+//
+//     // url.String() will be "https://example.com"
+//     r := mux.NewRouter()
+//     url, err := r.Host("example.com")
+//                  .Schemes("https", "http").URL()
+//
+// All variables defined in the route are required, and their values must
+// conform to the corresponding patterns.
+func (r *Route) URL(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	var scheme, host, path string
+	queries := make([]string, 0, len(r.regexp.queries))
+	if r.regexp.host != nil {
+		if host, err = r.regexp.host.url(values); err != nil {
+			return nil, err
+		}
+		scheme = "http"
+		if r.buildScheme != "" {
+			scheme = r.buildScheme
+		}
+	}
+	if r.regexp.path != nil {
+		if path, err = r.regexp.path.url(values); err != nil {
+			return nil, err
+		}
+	}
+	for _, q := range r.regexp.queries {
+		var query string
+		if query, err = q.url(values); err != nil {
+			return nil, err
+		}
+		queries = append(queries, query)
+	}
+	return &url.URL{
+		Scheme:   scheme,
+		Host:     host,
+		Path:     path,
+		RawQuery: strings.Join(queries, "&"),
+	}, nil
+}
+
+// URLHost builds the host part of the URL for a route. See Route.URL().
+//
+// The route must have a host defined.
+func (r *Route) URLHost(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.host == nil {
+		return nil, errors.New("mux: route doesn't have a host")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	host, err := r.regexp.host.url(values)
+	if err != nil {
+		return nil, err
+	}
+	u := &url.URL{
+		Scheme: "http",
+		Host:   host,
+	}
+	if r.buildScheme != "" {
+		u.Scheme = r.buildScheme
+	}
+	return u, nil
+}
+
+// URLPath builds the path part of the URL for a route. See Route.URL().
+//
+// The route must have a path defined.
+func (r *Route) URLPath(pairs ...string) (*url.URL, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.path == nil {
+		return nil, errors.New("mux: route doesn't have a path")
+	}
+	values, err := r.prepareVars(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	path, err := r.regexp.path.url(values)
+	if err != nil {
+		return nil, err
+	}
+	return &url.URL{
+		Path: path,
+	}, nil
+}
+
+// GetPathTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp.path == nil {
+		return "", errors.New("mux: route doesn't have a path")
+	}
+	return r.regexp.path.template, nil
+}
+
+// GetPathRegexp returns the expanded regular expression used to match route path.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a path.
+func (r *Route) GetPathRegexp() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp.path == nil {
+		return "", errors.New("mux: route does not have a path")
+	}
+	return r.regexp.path.regexp.String(), nil
+}
+
+// GetQueriesRegexp returns the expanded regular expressions used to match the
+// route queries.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not have queries.
+func (r *Route) GetQueriesRegexp() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.queries == nil {
+		return nil, errors.New("mux: route doesn't have queries")
+	}
+	queries := make([]string, 0, len(r.regexp.queries))
+	for _, query := range r.regexp.queries {
+		queries = append(queries, query.regexp.String())
+	}
+	return queries, nil
+}
+
+// GetQueriesTemplates returns the templates used to build the
+// query matching.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define queries.
+func (r *Route) GetQueriesTemplates() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	if r.regexp.queries == nil {
+		return nil, errors.New("mux: route doesn't have queries")
+	}
+	queries := make([]string, 0, len(r.regexp.queries))
+	for _, query := range r.regexp.queries {
+		queries = append(queries, query.template)
+	}
+	return queries, nil
+}
+
+// GetMethods returns the methods the route matches against
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if route does not have methods.
+func (r *Route) GetMethods() ([]string, error) {
+	if r.err != nil {
+		return nil, r.err
+	}
+	for _, m := range r.matchers {
+		if methods, ok := m.(methodMatcher); ok {
+			return []string(methods), nil
+		}
+	}
+	return nil, errors.New("mux: route doesn't have methods")
+}
+
+// GetHostTemplate returns the template used to build the
+// route match.
+// This is useful for building simple REST API documentation and for instrumentation
+// against third-party services.
+// An error will be returned if the route does not define a host.
+func (r *Route) GetHostTemplate() (string, error) {
+	if r.err != nil {
+		return "", r.err
+	}
+	if r.regexp.host == nil {
+		return "", errors.New("mux: route doesn't have a host")
+	}
+	return r.regexp.host.template, nil
+}
+
+// prepareVars converts the route variable pairs into a map. If the route has a
+// BuildVarsFunc, it is invoked.
+func (r *Route) prepareVars(pairs ...string) (map[string]string, error) {
+	m, err := mapFromPairsToString(pairs...)
+	if err != nil {
+		return nil, err
+	}
+	return r.buildVars(m), nil
+}
+
+func (r *Route) buildVars(m map[string]string) map[string]string {
+	if r.buildVarsFunc != nil {
+		m = r.buildVarsFunc(m)
+	}
+	return m
+}
diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go
new file mode 100644
index 0000000..5f5c496
--- /dev/null
+++ b/vendor/github.com/gorilla/mux/test_helpers.go
@@ -0,0 +1,19 @@
+// Copyright 2012 The Gorilla Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mux
+
+import "net/http"
+
+// SetURLVars sets the URL variables for the given request, to be accessed via
+// mux.Vars for testing route behaviour. Arguments are not modified, a shallow
+// copy is returned.
+//
+// This API should only be used for testing purposes; it provides a way to
+// inject variables into the request context. Alternatively, URL variables
+// can be set by making a route that captures the required variables,
+// starting a server and sending the request to that server.
+func SetURLVars(r *http.Request, val map[string]string) *http.Request {
+	return requestWithVars(r, val)
+}
diff --git a/vendor/github.com/guumaster/tablewriter/.gitignore b/vendor/github.com/guumaster/tablewriter/.gitignore
new file mode 100644
index 0000000..b66cec6
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/.gitignore
@@ -0,0 +1,15 @@
+# Created by .ignore support plugin (hsz.mobi)
+### Go template
+# Binaries for programs and plugins
+*.exe
+*.exe~
+*.dll
+*.so
+*.dylib
+
+# Test binary, build with `go test -c`
+*.test
+
+# Output of the go coverage tool, specifically when used with LiteIDE
+*.out
+
diff --git a/vendor/github.com/guumaster/tablewriter/LICENSE.md b/vendor/github.com/guumaster/tablewriter/LICENSE.md
new file mode 100644
index 0000000..a0769b5
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/LICENSE.md
@@ -0,0 +1,19 @@
+Copyright (C) 2014 by Oleku Konko
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/guumaster/tablewriter/README.md b/vendor/github.com/guumaster/tablewriter/README.md
new file mode 100644
index 0000000..46fa458
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/README.md
@@ -0,0 +1,431 @@
+ASCII Table Writer
+=========
+
+[![ci](https://github.com/olekukonko/tablewriter/workflows/ci/badge.svg?branch=master)](https://github.com/olekukonko/tablewriter/actions?query=workflow%3Aci)
+[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter)
+[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter)
+
+Generate ASCII table on the fly ...  Installation is simple as
+
+    go get github.com/olekukonko/tablewriter
+
+
+#### Features
+- Automatic Padding
+- Support Multiple Lines
+- Supports Alignment
+- Support Custom Separators
+- Automatic Alignment of numbers & percentage
+- Write directly to http , file etc via `io.Writer`
+- Read directly from CSV file
+- Optional row line via `SetRowLine`
+- Normalise table header
+- Make CSV Headers optional
+- Enable or disable table border
+- Set custom footer support
+- Optional identical cells merging
+- Set custom caption
+- Optional reflowing of paragraphs in multi-line cells.
+
+#### Example   1 - Basic
+```go
+data := [][]string{
+    []string{"A", "The Good", "500"},
+    []string{"B", "The Very very Bad Man", "288"},
+    []string{"C", "The Ugly", "120"},
+    []string{"D", "The Gopher", "800"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Name", "Sign", "Rating"})
+
+for _, v := range data {
+    table.Append(v)
+}
+table.Render() // Send output
+```
+
+##### Output  1
+```
++------+-----------------------+--------+
+| NAME |         SIGN          | RATING |
++------+-----------------------+--------+
+|  A   |       The Good        |    500 |
+|  B   | The Very very Bad Man |    288 |
+|  C   |       The Ugly        |    120 |
+|  D   |      The Gopher       |    800 |
++------+-----------------------+--------+
+```
+
+#### Example 2 - Without Border / Footer / Bulk Append
+```go
+data := [][]string{
+    []string{"1/1/2014", "Domain name", "2233", "$10.98"},
+    []string{"1/1/2014", "January Hosting", "2233", "$54.95"},
+    []string{"1/4/2014", "February Hosting", "2233", "$51.00"},
+    []string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
+table.SetBorder(false)                                // Set Border to false
+table.AppendBulk(data)                                // Add Bulk Data
+table.Render()
+```
+
+##### Output 2
+```
+
+    DATE   |       DESCRIPTION        |  CV2  | AMOUNT
+-----------+--------------------------+-------+----------
+  1/1/2014 | Domain name              |  2233 | $10.98
+  1/1/2014 | January Hosting          |  2233 | $54.95
+  1/4/2014 | February Hosting         |  2233 | $51.00
+  1/4/2014 | February Extra Bandwidth |  2233 | $30.00
+-----------+--------------------------+-------+----------
+                                        TOTAL | $146 93
+                                      --------+----------
+
+```
+
+
+#### Example 3 - CSV
+```go
+table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test_info.csv", true)
+table.SetAlignment(tablewriter.ALIGN_LEFT)   // Set Alignment
+table.Render()
+```
+
+##### Output 3
+```
++----------+--------------+------+-----+---------+----------------+
+|  FIELD   |     TYPE     | NULL | KEY | DEFAULT |     EXTRA      |
++----------+--------------+------+-----+---------+----------------+
+| user_id  | smallint(5)  | NO   | PRI | NULL    | auto_increment |
+| username | varchar(10)  | NO   |     | NULL    |                |
+| password | varchar(100) | NO   |     | NULL    |                |
++----------+--------------+------+-----+---------+----------------+
+```
+
+#### Example 4  - Custom Separator
+```go
+table, _ := tablewriter.NewCSV(os.Stdout, "testdata/test.csv", true)
+table.SetRowLine(true)         // Enable row line
+
+// Change table lines
+table.SetCenterSeparator("*")
+table.SetColumnSeparator("╪")
+table.SetRowSeparator("-")
+
+table.SetAlignment(tablewriter.ALIGN_LEFT)
+table.Render()
+```
+
+##### Output 4
+```
+*------------*-----------*---------*
+╪ FIRST NAME ╪ LAST NAME ╪   SSN   ╪
+*------------*-----------*---------*
+╪ John       ╪ Barry     ╪ 123456  ╪
+*------------*-----------*---------*
+╪ Kathy      ╪ Smith     ╪ 687987  ╪
+*------------*-----------*---------*
+╪ Bob        ╪ McCornick ╪ 3979870 ╪
+*------------*-----------*---------*
+```
+
+#### Example 5 - Markdown Format
+```go
+data := [][]string{
+	[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
+	[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
+	[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
+	[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetBorders(tablewriter.Border{Left: true, Top: false, Right: true, Bottom: false})
+table.SetCenterSeparator("|")
+table.AppendBulk(data) // Add Bulk Data
+table.Render()
+```
+
+##### Output 5
+```
+|   DATE   |       DESCRIPTION        | CV2  | AMOUNT |
+|----------|--------------------------|------|--------|
+| 1/1/2014 | Domain name              | 2233 | $10.98 |
+| 1/1/2014 | January Hosting          | 2233 | $54.95 |
+| 1/4/2014 | February Hosting         | 2233 | $51.00 |
+| 1/4/2014 | February Extra Bandwidth | 2233 | $30.00 |
+```
+
+#### Example 6  - Identical cells merging
+```go
+data := [][]string{
+  []string{"1/1/2014", "Domain name", "1234", "$10.98"},
+  []string{"1/1/2014", "January Hosting", "2345", "$54.95"},
+  []string{"1/4/2014", "February Hosting", "3456", "$51.00"},
+  []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"})
+table.SetAutoMergeCells(true)
+table.SetRowLine(true)
+table.AppendBulk(data)
+table.Render()
+```
+
+##### Output 6
+```
++----------+--------------------------+-------+---------+
+|   DATE   |       DESCRIPTION        |  CV2  | AMOUNT  |
++----------+--------------------------+-------+---------+
+| 1/1/2014 | Domain name              |  1234 | $10.98  |
++          +--------------------------+-------+---------+
+|          | January Hosting          |  2345 | $54.95  |
++----------+--------------------------+-------+---------+
+| 1/4/2014 | February Hosting         |  3456 | $51.00  |
++          +--------------------------+-------+---------+
+|          | February Extra Bandwidth |  4567 | $30.00  |
++----------+--------------------------+-------+---------+
+|                                       TOTAL | $146 93 |
++----------+--------------------------+-------+---------+
+```
+
+#### Example 7  - Identical cells merging (specify the column index to merge)
+```go
+data := [][]string{
+  []string{"1/1/2014", "Domain name", "1234", "$10.98"},
+  []string{"1/1/2014", "January Hosting", "1234", "$10.98"},
+  []string{"1/4/2014", "February Hosting", "3456", "$51.00"},
+  []string{"1/4/2014", "February Extra Bandwidth", "4567", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"})
+table.SetAutoMergeCellsByColumnIndex([]int{2, 3})
+table.SetRowLine(true)
+table.AppendBulk(data)
+table.Render()
+```
+
+##### Output 7
+```
++----------+--------------------------+-------+---------+
+|   DATE   |       DESCRIPTION        |  CV2  | AMOUNT  |
++----------+--------------------------+-------+---------+
+| 1/1/2014 | Domain name              |  1234 | $10.98  |
++----------+--------------------------+       +         +
+| 1/1/2014 | January Hosting          |       |         |
++----------+--------------------------+-------+---------+
+| 1/4/2014 | February Hosting         |  3456 | $51.00  |
++----------+--------------------------+-------+---------+
+| 1/4/2014 | February Extra Bandwidth |  4567 | $30.00  |
++----------+--------------------------+-------+---------+
+|                                       TOTAL | $146.93 |
++----------+--------------------------+-------+---------+
+```
+
+
+#### Table with color
+```go
+data := [][]string{
+	[]string{"1/1/2014", "Domain name", "2233", "$10.98"},
+	[]string{"1/1/2014", "January Hosting", "2233", "$54.95"},
+	[]string{"1/4/2014", "February Hosting", "2233", "$51.00"},
+	[]string{"1/4/2014", "February Extra Bandwidth", "2233", "$30.00"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Date", "Description", "CV2", "Amount"})
+table.SetFooter([]string{"", "", "Total", "$146.93"}) // Add Footer
+table.SetBorder(false)                                // Set Border to false
+
+table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor},
+	tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor},
+	tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor},
+	tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor})
+
+table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
+	tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor},
+	tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
+	tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor})
+
+table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{},
+	tablewriter.Colors{tablewriter.Bold},
+	tablewriter.Colors{tablewriter.FgHiRedColor})
+
+table.AppendBulk(data)
+table.Render()
+```
+
+#### Table with color Output
+![Table with Color](https://cloud.githubusercontent.com/assets/6460392/21101956/bbc7b356-c0a1-11e6-9f36-dba694746efc.png)
+
+#### Example - 8 Table Cells with Color
+
+Individual Cell Colors from `func Rich` take precedence over Column Colors
+
+```go
+data := [][]string{
+	[]string{"Test1Merge", "HelloCol2 - 1", "HelloCol3 - 1", "HelloCol4 - 1"},
+	[]string{"Test1Merge", "HelloCol2 - 2", "HelloCol3 - 2", "HelloCol4 - 2"},
+	[]string{"Test1Merge", "HelloCol2 - 3", "HelloCol3 - 3", "HelloCol4 - 3"},
+	[]string{"Test2Merge", "HelloCol2 - 4", "HelloCol3 - 4", "HelloCol4 - 4"},
+	[]string{"Test2Merge", "HelloCol2 - 5", "HelloCol3 - 5", "HelloCol4 - 5"},
+	[]string{"Test2Merge", "HelloCol2 - 6", "HelloCol3 - 6", "HelloCol4 - 6"},
+	[]string{"Test2Merge", "HelloCol2 - 7", "HelloCol3 - 7", "HelloCol4 - 7"},
+	[]string{"Test3Merge", "HelloCol2 - 8", "HelloCol3 - 8", "HelloCol4 - 8"},
+	[]string{"Test3Merge", "HelloCol2 - 9", "HelloCol3 - 9", "HelloCol4 - 9"},
+	[]string{"Test3Merge", "HelloCol2 - 10", "HelloCol3 -10", "HelloCol4 - 10"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Col1", "Col2", "Col3", "Col4"})
+table.SetFooter([]string{"", "", "Footer3", "Footer4"})
+table.SetBorder(false)
+
+table.SetHeaderColor(tablewriter.Colors{tablewriter.Bold, tablewriter.BgGreenColor},
+	tablewriter.Colors{tablewriter.FgHiRedColor, tablewriter.Bold, tablewriter.BgBlackColor},
+	tablewriter.Colors{tablewriter.BgRedColor, tablewriter.FgWhiteColor},
+	tablewriter.Colors{tablewriter.BgCyanColor, tablewriter.FgWhiteColor})
+
+table.SetColumnColor(tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
+	tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiRedColor},
+	tablewriter.Colors{tablewriter.Bold, tablewriter.FgHiBlackColor},
+	tablewriter.Colors{tablewriter.Bold, tablewriter.FgBlackColor})
+
+table.SetFooterColor(tablewriter.Colors{}, tablewriter.Colors{},
+	tablewriter.Colors{tablewriter.Bold},
+	tablewriter.Colors{tablewriter.FgHiRedColor})
+
+colorData1 := []string{"TestCOLOR1Merge", "HelloCol2 - COLOR1", "HelloCol3 - COLOR1", "HelloCol4 - COLOR1"}
+colorData2 := []string{"TestCOLOR2Merge", "HelloCol2 - COLOR2", "HelloCol3 - COLOR2", "HelloCol4 - COLOR2"}
+
+for i, row := range data {
+	if i == 4 {
+		table.Rich(colorData1, []tablewriter.Colors{tablewriter.Colors{}, tablewriter.Colors{tablewriter.Normal, tablewriter.FgCyanColor}, tablewriter.Colors{tablewriter.Bold, tablewriter.FgWhiteColor}, tablewriter.Colors{}})
+		table.Rich(colorData2, []tablewriter.Colors{tablewriter.Colors{tablewriter.Normal, tablewriter.FgMagentaColor}, tablewriter.Colors{}, tablewriter.Colors{tablewriter.Bold, tablewriter.BgRedColor}, tablewriter.Colors{tablewriter.FgHiGreenColor, tablewriter.Italic, tablewriter.BgHiCyanColor}})
+	}
+	table.Append(row)
+}
+
+table.SetAutoMergeCells(true)
+table.Render()
+
+```
+
+##### Table cells with color Output
+![Table cells with Color](https://user-images.githubusercontent.com/9064687/63969376-bcd88d80-ca6f-11e9-9466-c3d954700b25.png)
+
+#### Example 9 - Set table caption
+```go
+data := [][]string{
+    []string{"A", "The Good", "500"},
+    []string{"B", "The Very very Bad Man", "288"},
+    []string{"C", "The Ugly", "120"},
+    []string{"D", "The Gopher", "800"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Name", "Sign", "Rating"})
+table.SetCaption(true, "Movie ratings.")
+
+for _, v := range data {
+    table.Append(v)
+}
+table.Render() // Send output
+```
+
+Note: Caption text will wrap with total width of rendered table.
+
+##### Output 9
+```
++------+-----------------------+--------+
+| NAME |         SIGN          | RATING |
++------+-----------------------+--------+
+|  A   |       The Good        |    500 |
+|  B   | The Very very Bad Man |    288 |
+|  C   |       The Ugly        |    120 |
+|  D   |      The Gopher       |    800 |
++------+-----------------------+--------+
+Movie ratings.
+```
+
+#### Example 10 - Set NoWhiteSpace and TablePadding option
+```go
+data := [][]string{
+    {"node1.example.com", "Ready", "compute", "1.11"},
+    {"node2.example.com", "Ready", "compute", "1.11"},
+    {"node3.example.com", "Ready", "compute", "1.11"},
+    {"node4.example.com", "NotReady", "compute", "1.11"},
+}
+
+table := tablewriter.NewWriter(os.Stdout)
+table.SetHeader([]string{"Name", "Status", "Role", "Version"})
+table.SetAutoWrapText(false)
+table.SetAutoFormatHeaders(true)
+table.SetHeaderAlignment(ALIGN_LEFT)
+table.SetAlignment(ALIGN_LEFT)
+table.SetCenterSeparator("")
+table.SetColumnSeparator("")
+table.SetRowSeparator("")
+table.SetHeaderLine(false)
+table.SetBorder(false)
+table.SetTablePadding("\t") // pad with tabs
+table.SetNoWhiteSpace(true)
+table.AppendBulk(data) // Add Bulk Data
+table.Render()
+```
+
+##### Output 10
+```
+NAME             	STATUS  	ROLE   	VERSION 
+node1.example.com	Ready   	compute	1.11   	
+node2.example.com	Ready   	compute	1.11   	
+node3.example.com	Ready   	compute	1.11   	
+node4.example.com	NotReady	compute	1.11   	
+```
+
+#### Render table into a string
+
+Instead of rendering the table to `io.Stdout` you can also render it into a string. Go 1.10 introduced the `strings.Builder` type which implements the `io.Writer` interface and can therefore be used for this task. Example:
+
+```go
+package main
+
+import (
+    "strings"
+    "fmt"
+
+    "github.com/olekukonko/tablewriter"
+)
+
+func main() {
+    tableString := &strings.Builder{}
+    table := tablewriter.NewWriter(tableString)
+
+    /*
+     * Code to fill the table
+     */
+
+    table.Render()
+
+    fmt.Println(tableString.String())
+}
+```
+
+#### TODO
+- ~~Import Directly from CSV~~  - `done`
+- ~~Support for `SetFooter`~~  - `done`
+- ~~Support for `SetBorder`~~  - `done`
+- ~~Support table with uneven rows~~ - `done`
+- ~~Support custom alignment~~
+- General Improvement & Optimisation
+- `NewHTML` Parse table from HTML
diff --git a/vendor/github.com/guumaster/tablewriter/csv.go b/vendor/github.com/guumaster/tablewriter/csv.go
new file mode 100644
index 0000000..9887830
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/csv.go
@@ -0,0 +1,52 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer  API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+package tablewriter
+
+import (
+	"encoding/csv"
+	"io"
+	"os"
+)
+
+// Start A new table by importing from a CSV file
+// Takes io.Writer and csv File name
+func NewCSV(writer io.Writer, fileName string, hasHeader bool) (*Table, error) {
+	file, err := os.Open(fileName)
+	if err != nil {
+		return &Table{}, err
+	}
+	defer file.Close()
+	csvReader := csv.NewReader(file)
+	t, err := NewCSVReader(writer, csvReader, hasHeader)
+	return t, err
+}
+
+//  Start a New Table Writer with csv.Reader
+// This enables customisation such as reader.Comma = ';'
+// See http://golang.org/src/pkg/encoding/csv/reader.go?s=3213:3671#L94
+func NewCSVReader(writer io.Writer, csvReader *csv.Reader, hasHeader bool) (*Table, error) {
+	t := NewWriter(writer)
+	if hasHeader {
+		// Read the first row
+		headers, err := csvReader.Read()
+		if err != nil {
+			return &Table{}, err
+		}
+		t.SetHeader(headers)
+	}
+	for {
+		record, err := csvReader.Read()
+		if err == io.EOF {
+			break
+		} else if err != nil {
+			return &Table{}, err
+		}
+		t.Append(record)
+	}
+	return t, nil
+}
diff --git a/vendor/github.com/guumaster/tablewriter/go.mod b/vendor/github.com/guumaster/tablewriter/go.mod
new file mode 100644
index 0000000..78c8aa7
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/go.mod
@@ -0,0 +1,5 @@
+module github.com/guumaster/tablewriter
+
+go 1.12
+
+require github.com/mattn/go-runewidth v0.0.10
diff --git a/vendor/github.com/guumaster/tablewriter/go.sum b/vendor/github.com/guumaster/tablewriter/go.sum
new file mode 100644
index 0000000..b8b450d
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/go.sum
@@ -0,0 +1,4 @@
+github.com/mattn/go-runewidth v0.0.10 h1:CoZ3S2P7pvtP45xOtBw+/mDL2z0RKI576gSkzRRpdGg=
+github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
+github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
+github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
diff --git a/vendor/github.com/guumaster/tablewriter/table.go b/vendor/github.com/guumaster/tablewriter/table.go
new file mode 100644
index 0000000..08ae052
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/table.go
@@ -0,0 +1,1086 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer  API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+// Create & Generate text based table
+package tablewriter
+
+import (
+	"bytes"
+	"errors"
+	"fmt"
+	"io"
+	"reflect"
+	"regexp"
+	"strings"
+)
+
+const (
+	MAX_ROW_WIDTH = 30
+)
+
+const (
+	CENTER  = "+"
+	ROW     = "-"
+	COLUMN  = "|"
+	SPACE   = " "
+	NEWLINE = "\n"
+)
+
+const (
+	ALIGN_DEFAULT = iota
+	ALIGN_CENTER
+	ALIGN_RIGHT
+	ALIGN_LEFT
+)
+
+var (
+	decimal = regexp.MustCompile(`^-?(?:\d{1,3}(?:,\d{3})*|\d+)(?:\.\d+)?$`)
+	percent = regexp.MustCompile(`^-?\d+\.?\d*$%$`)
+)
+
+type Border struct {
+	Left   bool
+	Right  bool
+	Top    bool
+	Bottom bool
+}
+
+type Table struct {
+	separators              []int
+	out                     io.Writer
+	rows                    [][]string
+	lines                   [][][]string
+	cs                      map[int]int
+	rs                      map[int]int
+	headers                 [][]string
+	footers                 [][]string
+	caption                 bool
+	captionText             string
+	autoFmt                 bool
+	autoWrap                bool
+	reflowText              bool
+	mW                      int
+	pCenter                 string
+	pRow                    string
+	pColumn                 string
+	tColumn                 int
+	tRow                    int
+	hAlign                  int
+	fAlign                  int
+	align                   int
+	newLine                 string
+	rowLine                 bool
+	autoMergeCells          bool
+	columnsToAutoMergeCells map[int]bool
+	noWhiteSpace            bool
+	tablePadding            string
+	hdrLine                 bool
+	borders                 Border
+	colSize                 int
+	headerParams            []string
+	columnsParams           []string
+	footerParams            []string
+	columnsAlign            []int
+}
+
+// Start New Table
+// Take io.Writer Directly
+func NewWriter(writer io.Writer) *Table {
+	t := &Table{
+		separators:    []int{},
+		out:           writer,
+		rows:          [][]string{},
+		lines:         [][][]string{},
+		cs:            make(map[int]int),
+		rs:            make(map[int]int),
+		headers:       [][]string{},
+		footers:       [][]string{},
+		caption:       false,
+		captionText:   "Table caption.",
+		autoFmt:       true,
+		autoWrap:      true,
+		reflowText:    true,
+		mW:            MAX_ROW_WIDTH,
+		pCenter:       CENTER,
+		pRow:          ROW,
+		pColumn:       COLUMN,
+		tColumn:       -1,
+		tRow:          -1,
+		hAlign:        ALIGN_DEFAULT,
+		fAlign:        ALIGN_DEFAULT,
+		align:         ALIGN_DEFAULT,
+		newLine:       NEWLINE,
+		rowLine:       false,
+		hdrLine:       true,
+		borders:       Border{Left: true, Right: true, Bottom: true, Top: true},
+		colSize:       -1,
+		headerParams:  []string{},
+		columnsParams: []string{},
+		footerParams:  []string{},
+		columnsAlign:  []int{}}
+	return t
+}
+
+func (t *Table) AddSeparator() {
+	t.separators = append(t.separators, len(t.lines))
+}
+
+// Render table output
+func (t *Table) Render() {
+	if t.borders.Top {
+		t.printLine(true)
+	}
+	t.printHeading()
+	if t.autoMergeCells {
+		t.printRowsMergeCells()
+	} else {
+		t.printRows()
+	}
+	if !t.rowLine && t.borders.Bottom {
+		t.printLine(true)
+	}
+	t.printFooter()
+
+	if t.caption {
+		t.printCaption()
+	}
+}
+
+const (
+	headerRowIdx = -1
+	footerRowIdx = -2
+)
+
+// Set table header
+func (t *Table) SetHeader(keys []string) {
+	t.colSize = len(keys)
+	for i, v := range keys {
+		lines := t.parseDimension(v, i, headerRowIdx)
+		t.headers = append(t.headers, lines)
+	}
+}
+
+// Set table Footer
+func (t *Table) SetFooter(keys []string) {
+	//t.colSize = len(keys)
+	for i, v := range keys {
+		lines := t.parseDimension(v, i, footerRowIdx)
+		t.footers = append(t.footers, lines)
+	}
+}
+
+// Set table Caption
+func (t *Table) SetCaption(caption bool, captionText ...string) {
+	t.caption = caption
+	if len(captionText) == 1 {
+		t.captionText = captionText[0]
+	}
+}
+
+// Turn header autoformatting on/off. Default is on (true).
+func (t *Table) SetAutoFormatHeaders(auto bool) {
+	t.autoFmt = auto
+}
+
+// Turn automatic multiline text adjustment on/off. Default is on (true).
+func (t *Table) SetAutoWrapText(auto bool) {
+	t.autoWrap = auto
+}
+
+// Turn automatic reflowing of multiline text when rewrapping. Default is on (true).
+func (t *Table) SetReflowDuringAutoWrap(auto bool) {
+	t.reflowText = auto
+}
+
+// Set the Default column width
+func (t *Table) SetColWidth(width int) {
+	t.mW = width
+}
+
+// Set the minimal width for a column
+func (t *Table) SetColMinWidth(column int, width int) {
+	t.cs[column] = width
+}
+
+// Set the Column Separator
+func (t *Table) SetColumnSeparator(sep string) {
+	t.pColumn = sep
+}
+
+// Set the Row Separator
+func (t *Table) SetRowSeparator(sep string) {
+	t.pRow = sep
+}
+
+// Set the center Separator
+func (t *Table) SetCenterSeparator(sep string) {
+	t.pCenter = sep
+}
+
+// Set Header Alignment
+func (t *Table) SetHeaderAlignment(hAlign int) {
+	t.hAlign = hAlign
+}
+
+// Set Footer Alignment
+func (t *Table) SetFooterAlignment(fAlign int) {
+	t.fAlign = fAlign
+}
+
+// Set Table Alignment
+func (t *Table) SetAlignment(align int) {
+	t.align = align
+}
+
+// Set No White Space
+func (t *Table) SetNoWhiteSpace(allow bool) {
+	t.noWhiteSpace = allow
+}
+
+// Set Table Padding
+func (t *Table) SetTablePadding(padding string) {
+	t.tablePadding = padding
+}
+
+func (t *Table) SetColumnAlignment(keys []int) {
+	for _, v := range keys {
+		switch v {
+		case ALIGN_CENTER:
+			break
+		case ALIGN_LEFT:
+			break
+		case ALIGN_RIGHT:
+			break
+		default:
+			v = ALIGN_DEFAULT
+		}
+		t.columnsAlign = append(t.columnsAlign, v)
+	}
+}
+
+// Set New Line
+func (t *Table) SetNewLine(nl string) {
+	t.newLine = nl
+}
+
+// Set Header Line
+// This would enable / disable a line after the header
+func (t *Table) SetHeaderLine(line bool) {
+	t.hdrLine = line
+}
+
+// Set Row Line
+// This would enable / disable a line on each row of the table
+func (t *Table) SetRowLine(line bool) {
+	t.rowLine = line
+}
+
+// Set Auto Merge Cells
+// This would enable / disable the merge of cells with identical values
+func (t *Table) SetAutoMergeCells(auto bool) {
+	t.autoMergeCells = auto
+}
+
+// Set Auto Merge Cells By Column Index
+// This would enable / disable the merge of cells with identical values for specific columns
+// If cols is empty, it is the same as `SetAutoMergeCells(true)`.
+func (t *Table) SetAutoMergeCellsByColumnIndex(cols []int) {
+	t.autoMergeCells = true
+
+	if len(cols) > 0 {
+		m := make(map[int]bool)
+		for _, col := range cols {
+			m[col] = true
+		}
+		t.columnsToAutoMergeCells = m
+	}
+}
+
+// Set Table Border
+// This would enable / disable line around the table
+func (t *Table) SetBorder(border bool) {
+	t.SetBorders(Border{border, border, border, border})
+}
+
+func (t *Table) SetBorders(border Border) {
+	t.borders = border
+}
+
+// SetStructs sets header and rows from slice of struct.
+// If something that is not a slice is passed, error will be returned.
+// The tag specified by "tablewriter" for the struct becomes the header.
+// If not specified or empty, the field name will be used.
+// The field of the first element of the slice is used as the header.
+// If the element implements fmt.Stringer, the result will be used.
+// And the slice contains nil, it will be skipped without rendering.
+func (t *Table) SetStructs(v interface{}) error {
+	if v == nil {
+		return errors.New("nil value")
+	}
+	vt := reflect.TypeOf(v)
+	vv := reflect.ValueOf(v)
+	switch vt.Kind() {
+	case reflect.Slice, reflect.Array:
+		if vv.Len() < 1 {
+			return errors.New("empty value")
+		}
+
+		// check first element to set header
+		first := vv.Index(0)
+		e := first.Type()
+		switch e.Kind() {
+		case reflect.Struct:
+			// OK
+		case reflect.Ptr:
+			if first.IsNil() {
+				return errors.New("the first element is nil")
+			}
+			e = first.Elem().Type()
+			if e.Kind() != reflect.Struct {
+				return fmt.Errorf("invalid kind %s", e.Kind())
+			}
+		default:
+			return fmt.Errorf("invalid kind %s", e.Kind())
+		}
+		n := e.NumField()
+		headers := make([]string, n)
+		for i := 0; i < n; i++ {
+			f := e.Field(i)
+			header := f.Tag.Get("tablewriter")
+			if header == "" {
+				header = f.Name
+			}
+			headers[i] = header
+		}
+		t.SetHeader(headers)
+
+		for i := 0; i < vv.Len(); i++ {
+			item := reflect.Indirect(vv.Index(i))
+			itemType := reflect.TypeOf(item)
+			switch itemType.Kind() {
+			case reflect.Struct:
+				// OK
+			default:
+				return fmt.Errorf("invalid item type %v", itemType.Kind())
+			}
+			if !item.IsValid() {
+				// skip rendering
+				continue
+			}
+			nf := item.NumField()
+			if n != nf {
+				return errors.New("invalid num of field")
+			}
+			rows := make([]string, nf)
+			for j := 0; j < nf; j++ {
+				f := reflect.Indirect(item.Field(j))
+				if f.Kind() == reflect.Ptr {
+					f = f.Elem()
+				}
+				if f.IsValid() {
+					if s, ok := f.Interface().(fmt.Stringer); ok {
+						rows[j] = s.String()
+						continue
+					}
+					rows[j] = fmt.Sprint(f)
+				} else {
+					rows[j] = "nil"
+				}
+			}
+			t.Append(rows)
+		}
+	default:
+		return fmt.Errorf("invalid type %T", v)
+	}
+	return nil
+}
+
+// Append row to table
+func (t *Table) Append(row []string) {
+	rowSize := len(t.headers)
+	if rowSize > t.colSize {
+		t.colSize = rowSize
+	}
+
+	n := len(t.lines)
+	line := [][]string{}
+	for i, v := range row {
+
+		// Detect string  width
+		// Detect String height
+		// Break strings into words
+		out := t.parseDimension(v, i, n)
+
+		// Append broken words
+		line = append(line, out)
+	}
+	t.lines = append(t.lines, line)
+}
+
+// Append row to table with color attributes
+func (t *Table) Rich(row []string, colors []Colors) {
+	rowSize := len(t.headers)
+	if rowSize > t.colSize {
+		t.colSize = rowSize
+	}
+
+	n := len(t.lines)
+	line := [][]string{}
+	for i, v := range row {
+
+		// Detect string  width
+		// Detect String height
+		// Break strings into words
+		out := t.parseDimension(v, i, n)
+
+		if len(colors) > i {
+			color := colors[i]
+			out[0] = format(out[0], color)
+		}
+
+		// Append broken words
+		line = append(line, out)
+	}
+	t.lines = append(t.lines, line)
+}
+
+// Allow Support for Bulk Append
+// Eliminates repeated for loops
+func (t *Table) AppendBulk(rows [][]string) {
+	for _, row := range rows {
+		t.Append(row)
+	}
+}
+
+// NumLines to get the number of lines
+func (t *Table) NumLines() int {
+	return len(t.lines)
+}
+
+// Clear rows
+func (t *Table) ClearRows() {
+	t.lines = [][][]string{}
+}
+
+// Clear footer
+func (t *Table) ClearFooter() {
+	t.footers = [][]string{}
+}
+
+// Center based on position and border.
+func (t *Table) center(i int) string {
+	if i == -1 && !t.borders.Left {
+		return t.pRow
+	}
+
+	if i == len(t.cs)-1 && !t.borders.Right {
+		return t.pRow
+	}
+
+	return t.pCenter
+}
+
+// Print line based on row width
+func (t *Table) printLine(nl bool) {
+	fmt.Fprint(t.out, t.center(-1))
+	for i := 0; i < len(t.cs); i++ {
+		v := t.cs[i]
+		fmt.Fprintf(t.out, "%s%s%s%s",
+			t.pRow,
+			strings.Repeat(string(t.pRow), v),
+			t.pRow,
+			t.center(i))
+	}
+	if nl {
+		fmt.Fprint(t.out, t.newLine)
+	}
+}
+
+// Print line based on row width with our without cell separator
+func (t *Table) printLineOptionalCellSeparators(nl bool, displayCellSeparator []bool) {
+	fmt.Fprint(t.out, t.pCenter)
+	for i := 0; i < len(t.cs); i++ {
+		v := t.cs[i]
+		if i > len(displayCellSeparator) || displayCellSeparator[i] {
+			// Display the cell separator
+			fmt.Fprintf(t.out, "%s%s%s%s",
+				t.pRow,
+				strings.Repeat(string(t.pRow), v),
+				t.pRow,
+				t.pCenter)
+		} else {
+			// Don't display the cell separator for this cell
+			fmt.Fprintf(t.out, "%s%s",
+				strings.Repeat(" ", v+2),
+				t.pCenter)
+		}
+	}
+	if nl {
+		fmt.Fprint(t.out, t.newLine)
+	}
+}
+
+// Return the PadRight function if align is left, PadLeft if align is right,
+// and Pad by default
+func pad(align int) func(string, string, int) string {
+	padFunc := Pad
+	switch align {
+	case ALIGN_LEFT:
+		padFunc = PadRight
+	case ALIGN_RIGHT:
+		padFunc = PadLeft
+	}
+	return padFunc
+}
+
+// Print heading information
+func (t *Table) printHeading() {
+	// Check if headers is available
+	if len(t.headers) < 1 {
+		return
+	}
+
+	// Identify last column
+	end := len(t.cs) - 1
+
+	// Get pad function
+	padFunc := pad(t.hAlign)
+
+	// Checking for ANSI escape sequences for header
+	is_esc_seq := false
+	if len(t.headerParams) > 0 {
+		is_esc_seq = true
+	}
+
+	// Maximum height.
+	max := t.rs[headerRowIdx]
+
+	// Print Heading
+	for x := 0; x < max; x++ {
+		// Check if border is set
+		// Replace with space if not set
+		if !t.noWhiteSpace {
+			fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
+		}
+
+		for y := 0; y <= end; y++ {
+			v := t.cs[y]
+			h := ""
+
+			if y < len(t.headers) && x < len(t.headers[y]) {
+				h = t.headers[y][x]
+			}
+			if t.autoFmt {
+				h = Title(h)
+			}
+			pad := ConditionString((y == end && !t.borders.Left), SPACE, t.pColumn)
+			if t.noWhiteSpace {
+				pad = ConditionString((y == end && !t.borders.Left), SPACE, t.tablePadding)
+			}
+			if is_esc_seq {
+				if !t.noWhiteSpace {
+					fmt.Fprintf(t.out, " %s %s",
+						format(padFunc(h, SPACE, v),
+							t.headerParams[y]), pad)
+				} else {
+					fmt.Fprintf(t.out, "%s %s",
+						format(padFunc(h, SPACE, v),
+							t.headerParams[y]), pad)
+				}
+			} else {
+				if !t.noWhiteSpace {
+					fmt.Fprintf(t.out, " %s %s",
+						padFunc(h, SPACE, v),
+						pad)
+				} else {
+					// the spaces between breaks the kube formatting
+					fmt.Fprintf(t.out, "%s%s",
+						padFunc(h, SPACE, v),
+						pad)
+				}
+			}
+		}
+		// Next line
+		fmt.Fprint(t.out, t.newLine)
+	}
+	if t.hdrLine {
+		t.printLine(true)
+	}
+}
+
+// Print heading information
+func (t *Table) printFooter() {
+	// Check if headers is available
+	if len(t.footers) < 1 {
+		return
+	}
+
+	// Only print line if border is not set
+	if !t.borders.Bottom {
+		t.printLine(true)
+	}
+
+	// Identify last column
+	end := len(t.cs) - 1
+
+	// Get pad function
+	padFunc := pad(t.fAlign)
+
+	// Checking for ANSI escape sequences for header
+	is_esc_seq := false
+	if len(t.footerParams) > 0 {
+		is_esc_seq = true
+	}
+
+	// Maximum height.
+	max := t.rs[footerRowIdx]
+
+	// Print Footer
+	erasePad := make([]bool, len(t.footers))
+	for x := 0; x < max; x++ {
+		// Check if border is set
+		// Replace with space if not set
+		fmt.Fprint(t.out, ConditionString(t.borders.Bottom, t.pColumn, SPACE))
+
+		for y := 0; y <= end; y++ {
+			v := t.cs[y]
+			f := ""
+			if y < len(t.footers) && x < len(t.footers[y]) {
+				f = t.footers[y][x]
+			}
+			if t.autoFmt {
+				f = Title(f)
+			}
+			pad := ConditionString((y == end && !t.borders.Top), SPACE, t.pColumn)
+
+			if erasePad[y] || (x == 0 && len(f) == 0) {
+				pad = SPACE
+				erasePad[y] = true
+			}
+
+			if is_esc_seq {
+				fmt.Fprintf(t.out, " %s %s",
+					format(padFunc(f, SPACE, v),
+						t.footerParams[y]), pad)
+			} else {
+				fmt.Fprintf(t.out, " %s %s",
+					padFunc(f, SPACE, v),
+					pad)
+			}
+
+			//fmt.Fprintf(t.out, " %s %s",
+			//	padFunc(f, SPACE, v),
+			//	pad)
+		}
+		// Next line
+		fmt.Fprint(t.out, t.newLine)
+		//t.printLine(true)
+	}
+
+	hasPrinted := false
+
+	for i := 0; i <= end; i++ {
+		v := t.cs[i]
+		pad := t.pRow
+		center := t.pCenter
+		length := len(t.footers[i][0])
+
+		if length > 0 {
+			hasPrinted = true
+		}
+
+		// Set center to be space if length is 0
+		if length == 0 && !t.borders.Right {
+			center = SPACE
+		}
+
+		// Print first junction
+		if i == 0 {
+			if length > 0 && !t.borders.Left {
+				center = t.pRow
+			}
+			fmt.Fprint(t.out, center)
+		}
+
+		// Pad With space of length is 0
+		if length == 0 {
+			pad = SPACE
+		}
+		// Ignore left space as it has printed before
+		if hasPrinted || t.borders.Left {
+			pad = t.pRow
+			center = t.pCenter
+		}
+
+		// Change Center end position
+		if center != SPACE {
+			if i == end && !t.borders.Right {
+				center = t.pRow
+			}
+		}
+
+		// Change Center start position
+		if center == SPACE {
+			if i < end && len(t.footers[i+1][0]) != 0 {
+				if !t.borders.Left {
+					center = t.pRow
+				} else {
+					center = t.pCenter
+				}
+			}
+		}
+
+		// Print the footer
+		fmt.Fprintf(t.out, "%s%s%s%s",
+			pad,
+			strings.Repeat(string(pad), v),
+			pad,
+			center)
+
+	}
+
+	fmt.Fprint(t.out, t.newLine)
+}
+
+// Print caption text
+func (t Table) printCaption() {
+	width := t.getTableWidth()
+	paragraph, _ := WrapString(t.captionText, width)
+	for linecount := 0; linecount < len(paragraph); linecount++ {
+		fmt.Fprintln(t.out, paragraph[linecount])
+	}
+}
+
+// Calculate the total number of characters in a row
+func (t Table) getTableWidth() int {
+	var chars int
+	for _, v := range t.cs {
+		chars += v
+	}
+
+	// Add chars, spaces, seperators to calculate the total width of the table.
+	// ncols := t.colSize
+	// spaces := ncols * 2
+	// seps := ncols + 1
+
+	return (chars + (3 * t.colSize) + 2)
+}
+
+func (t Table) printRows() {
+	sepIdx := 0
+	for i, lines := range t.lines {
+		if sepIdx < len(t.separators) && i == t.separators[sepIdx] {
+			t.printLine(true)
+			if sepIdx < len(t.separators) {
+				sepIdx++
+			}
+		}
+		t.printRow(lines, i)
+	}
+}
+
+func (t *Table) fillAlignment(num int) {
+	if len(t.columnsAlign) < num {
+		t.columnsAlign = make([]int, num)
+		for i := range t.columnsAlign {
+			t.columnsAlign[i] = t.align
+		}
+	}
+}
+
+// Print Row Information
+// Adjust column alignment based on type
+
+func (t *Table) printRow(columns [][]string, rowIdx int) {
+	// Get Maximum Height
+	max := t.rs[rowIdx]
+	total := len(columns)
+
+	// TODO Fix uneven col size
+	// if total < t.colSize {
+	//	for n := t.colSize - total; n < t.colSize ; n++ {
+	//		columns = append(columns, []string{SPACE})
+	//		t.cs[n] = t.mW
+	//	}
+	//}
+
+	// Pad Each Height
+	pads := []int{}
+
+	// Checking for ANSI escape sequences for columns
+	is_esc_seq := false
+	if len(t.columnsParams) > 0 {
+		is_esc_seq = true
+	}
+	t.fillAlignment(total)
+
+	for i, line := range columns {
+		length := len(line)
+		pad := max - length
+		pads = append(pads, pad)
+		for n := 0; n < pad; n++ {
+			columns[i] = append(columns[i], "  ")
+		}
+	}
+	//fmt.Println(max, "\n")
+	for x := 0; x < max; x++ {
+		for y := 0; y < total; y++ {
+
+			// Check if border is set
+			if !t.noWhiteSpace {
+				fmt.Fprint(t.out, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn))
+				fmt.Fprintf(t.out, SPACE)
+			}
+
+			str := columns[y][x]
+
+			// Embedding escape sequence with column value
+			if is_esc_seq {
+				str = format(str, t.columnsParams[y])
+			}
+
+			// This would print alignment
+			// Default alignment  would use multiple configuration
+			switch t.columnsAlign[y] {
+			case ALIGN_CENTER: //
+				fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
+			case ALIGN_RIGHT:
+				fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y]))
+			case ALIGN_LEFT:
+				fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
+			default:
+				if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) {
+					fmt.Fprintf(t.out, "%s", PadLeft(str, SPACE, t.cs[y]))
+				} else {
+					fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
+
+					// TODO Custom alignment per column
+					//if max == 1 || pads[y] > 0 {
+					//	fmt.Fprintf(t.out, "%s", Pad(str, SPACE, t.cs[y]))
+					//} else {
+					//	fmt.Fprintf(t.out, "%s", PadRight(str, SPACE, t.cs[y]))
+					//}
+
+				}
+			}
+			if !t.noWhiteSpace {
+				fmt.Fprintf(t.out, SPACE)
+			} else {
+				fmt.Fprintf(t.out, t.tablePadding)
+			}
+		}
+		// Check if border is set
+		// Replace with space if not set
+		if !t.noWhiteSpace {
+			fmt.Fprint(t.out, ConditionString(t.borders.Left, t.pColumn, SPACE))
+		}
+		fmt.Fprint(t.out, t.newLine)
+	}
+
+	if t.rowLine {
+		t.printLine(true)
+	}
+}
+
+// Print the rows of the table and merge the cells that are identical
+func (t *Table) printRowsMergeCells() {
+	var previousLine []string
+	var displayCellBorder []bool
+	var tmpWriter bytes.Buffer
+
+	sepIdx := 0
+	for i, lines := range t.lines {
+		// We store the display of the current line in a tmp writer, as we need to know which border needs to be print above
+		previousLine, displayCellBorder = t.printRowMergeCells(&tmpWriter, lines, i, previousLine)
+		if i > 0 { // We don't need to print borders above first line
+			if sepIdx < len(t.separators) && i == t.separators[sepIdx] {
+				t.printLine(true)
+				if sepIdx < len(t.separators) {
+					sepIdx++
+				}
+			}
+			if t.rowLine {
+				t.printLineOptionalCellSeparators(true, displayCellBorder)
+			}
+		}
+		tmpWriter.WriteTo(t.out)
+	}
+	//Print the end of the table
+	if t.rowLine {
+		t.printLine(true)
+	}
+}
+
+// Print Row Information to a writer and merge identical cells.
+// Adjust column alignment based on type
+
+func (t *Table) printRowMergeCells(writer io.Writer, columns [][]string, rowIdx int, previousLine []string) ([]string, []bool) {
+	// Get Maximum Height
+	max := t.rs[rowIdx]
+	total := len(columns)
+
+	// Pad Each Height
+	pads := []int{}
+
+	// Checking for ANSI escape sequences for columns
+	is_esc_seq := false
+	if len(t.columnsParams) > 0 {
+		is_esc_seq = true
+	}
+	for i, line := range columns {
+		length := len(line)
+		pad := max - length
+		pads = append(pads, pad)
+		for n := 0; n < pad; n++ {
+			columns[i] = append(columns[i], "  ")
+		}
+	}
+
+	var displayCellBorder []bool
+	t.fillAlignment(total)
+	for x := 0; x < max; x++ {
+		for y := 0; y < total; y++ {
+
+			// Check if border is set
+			fmt.Fprint(writer, ConditionString((!t.borders.Left && y == 0), SPACE, t.pColumn))
+
+			fmt.Fprintf(writer, SPACE)
+
+			str := columns[y][x]
+
+			// Embedding escape sequence with column value
+			if is_esc_seq {
+				str = format(str, t.columnsParams[y])
+			}
+
+			if t.autoMergeCells {
+				// Skip separator lines
+				for _, i := range t.separators {
+					if i == rowIdx {
+						continue
+					}
+				}
+
+				var mergeCell bool
+				if t.columnsToAutoMergeCells != nil {
+					// Check to see if the column index is in columnsToAutoMergeCells.
+					if t.columnsToAutoMergeCells[y] {
+						mergeCell = true
+					}
+				} else {
+					// columnsToAutoMergeCells was not set.
+					mergeCell = true
+				}
+				//Store the full line to merge mutli-lines cells
+				fullLine := strings.TrimRight(strings.Join(columns[y], " "), " ")
+				if len(previousLine) > y && fullLine == previousLine[y] && fullLine != "" && mergeCell {
+					// If this cell is identical to the one above but not empty, we don't display the border and keep the cell empty.
+					displayCellBorder = append(displayCellBorder, false)
+					str = ""
+				} else {
+					// First line or different content, keep the content and print the cell border
+					displayCellBorder = append(displayCellBorder, true)
+				}
+			}
+
+			// This would print alignment
+			// Default alignment  would use multiple configuration
+			switch t.columnsAlign[y] {
+			case ALIGN_CENTER: //
+				fmt.Fprintf(writer, "%s", Pad(str, SPACE, t.cs[y]))
+			case ALIGN_RIGHT:
+				fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y]))
+			case ALIGN_LEFT:
+				fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y]))
+			default:
+				if decimal.MatchString(strings.TrimSpace(str)) || percent.MatchString(strings.TrimSpace(str)) {
+					fmt.Fprintf(writer, "%s", PadLeft(str, SPACE, t.cs[y]))
+				} else {
+					fmt.Fprintf(writer, "%s", PadRight(str, SPACE, t.cs[y]))
+				}
+			}
+			fmt.Fprintf(writer, SPACE)
+		}
+		// Check if border is set
+		// Replace with space if not set
+		fmt.Fprint(writer, ConditionString(t.borders.Left, t.pColumn, SPACE))
+		fmt.Fprint(writer, t.newLine)
+	}
+
+	//The new previous line is the current one
+	previousLine = make([]string, total)
+	for y := 0; y < total; y++ {
+		previousLine[y] = strings.TrimRight(strings.Join(columns[y], " "), " ") //Store the full line for multi-lines cells
+	}
+	//Returns the newly added line and wether or not a border should be displayed above.
+	return previousLine, displayCellBorder
+}
+
+func (t *Table) parseDimension(str string, colKey, rowKey int) []string {
+	var (
+		raw      []string
+		maxWidth int
+	)
+
+	raw = getLines(str)
+	maxWidth = 0
+	for _, line := range raw {
+		if w := DisplayWidth(line); w > maxWidth {
+			maxWidth = w
+		}
+	}
+
+	// If wrapping, ensure that all paragraphs in the cell fit in the
+	// specified width.
+	if t.autoWrap {
+		// If there's a maximum allowed width for wrapping, use that.
+		if maxWidth > t.mW {
+			maxWidth = t.mW
+		}
+
+		// In the process of doing so, we need to recompute maxWidth. This
+		// is because perhaps a word in the cell is longer than the
+		// allowed maximum width in t.mW.
+		newMaxWidth := maxWidth
+		newRaw := make([]string, 0, len(raw))
+
+		if t.reflowText {
+			// Make a single paragraph of everything.
+			raw = []string{strings.Join(raw, " ")}
+		}
+		for i, para := range raw {
+			paraLines, _ := WrapString(para, maxWidth)
+			for _, line := range paraLines {
+				if w := DisplayWidth(line); w > newMaxWidth {
+					newMaxWidth = w
+				}
+			}
+			if i > 0 {
+				newRaw = append(newRaw, " ")
+			}
+			newRaw = append(newRaw, paraLines...)
+		}
+		raw = newRaw
+		maxWidth = newMaxWidth
+	}
+
+	// Store the new known maximum width.
+	v, ok := t.cs[colKey]
+	if !ok || v < maxWidth || v == 0 {
+		t.cs[colKey] = maxWidth
+	}
+
+	// Remember the number of lines for the row printer.
+	h := len(raw)
+	v, ok = t.rs[rowKey]
+
+	if !ok || v < h || v == 0 {
+		t.rs[rowKey] = h
+	}
+	//fmt.Printf("Raw %+v %d\n", raw, len(raw))
+	return raw
+}
diff --git a/vendor/github.com/guumaster/tablewriter/table_with_color.go b/vendor/github.com/guumaster/tablewriter/table_with_color.go
new file mode 100644
index 0000000..ae7a364
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/table_with_color.go
@@ -0,0 +1,136 @@
+package tablewriter
+
+import (
+	"fmt"
+	"strconv"
+	"strings"
+)
+
+const ESC = "\033"
+const SEP = ";"
+
+const (
+	BgBlackColor int = iota + 40
+	BgRedColor
+	BgGreenColor
+	BgYellowColor
+	BgBlueColor
+	BgMagentaColor
+	BgCyanColor
+	BgWhiteColor
+)
+
+const (
+	FgBlackColor int = iota + 30
+	FgRedColor
+	FgGreenColor
+	FgYellowColor
+	FgBlueColor
+	FgMagentaColor
+	FgCyanColor
+	FgWhiteColor
+)
+
+const (
+	BgHiBlackColor int = iota + 100
+	BgHiRedColor
+	BgHiGreenColor
+	BgHiYellowColor
+	BgHiBlueColor
+	BgHiMagentaColor
+	BgHiCyanColor
+	BgHiWhiteColor
+)
+
+const (
+	FgHiBlackColor int = iota + 90
+	FgHiRedColor
+	FgHiGreenColor
+	FgHiYellowColor
+	FgHiBlueColor
+	FgHiMagentaColor
+	FgHiCyanColor
+	FgHiWhiteColor
+)
+
+const (
+	Normal          = 0
+	Bold            = 1
+	UnderlineSingle = 4
+	Italic
+)
+
+type Colors []int
+
+func startFormat(seq string) string {
+	return fmt.Sprintf("%s[%sm", ESC, seq)
+}
+
+func stopFormat() string {
+	return fmt.Sprintf("%s[%dm", ESC, Normal)
+}
+
+// Making the SGR (Select Graphic Rendition) sequence.
+func makeSequence(codes []int) string {
+	codesInString := []string{}
+	for _, code := range codes {
+		codesInString = append(codesInString, strconv.Itoa(code))
+	}
+	return strings.Join(codesInString, SEP)
+}
+
+// Adding ANSI escape  sequences before and after string
+func format(s string, codes interface{}) string {
+	var seq string
+
+	switch v := codes.(type) {
+
+	case string:
+		seq = v
+	case []int:
+		seq = makeSequence(v)
+	case Colors:
+		seq = makeSequence(v)
+	default:
+		return s
+	}
+
+	if len(seq) == 0 {
+		return s
+	}
+	return startFormat(seq) + s + stopFormat()
+}
+
+// Adding header colors (ANSI codes)
+func (t *Table) SetHeaderColor(colors ...Colors) {
+	if t.colSize != len(colors) {
+		panic("Number of header colors must be equal to number of headers.")
+	}
+	for i := 0; i < len(colors); i++ {
+		t.headerParams = append(t.headerParams, makeSequence(colors[i]))
+	}
+}
+
+// Adding column colors (ANSI codes)
+func (t *Table) SetColumnColor(colors ...Colors) {
+	if t.colSize != len(colors) {
+		panic("Number of column colors must be equal to number of headers.")
+	}
+	for i := 0; i < len(colors); i++ {
+		t.columnsParams = append(t.columnsParams, makeSequence(colors[i]))
+	}
+}
+
+// Adding column colors (ANSI codes)
+func (t *Table) SetFooterColor(colors ...Colors) {
+	if len(t.footers) != len(colors) {
+		panic("Number of footer colors must be equal to number of footer.")
+	}
+	for i := 0; i < len(colors); i++ {
+		t.footerParams = append(t.footerParams, makeSequence(colors[i]))
+	}
+}
+
+func Color(colors ...int) []int {
+	return colors
+}
diff --git a/vendor/github.com/guumaster/tablewriter/util.go b/vendor/github.com/guumaster/tablewriter/util.go
new file mode 100644
index 0000000..380e7ab
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/util.go
@@ -0,0 +1,93 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer  API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+package tablewriter
+
+import (
+	"math"
+	"regexp"
+	"strings"
+
+	"github.com/mattn/go-runewidth"
+)
+
+var ansi = regexp.MustCompile("\033\\[(?:[0-9]{1,3}(?:;[0-9]{1,3})*)?[m|K]")
+
+func DisplayWidth(str string) int {
+	return runewidth.StringWidth(ansi.ReplaceAllLiteralString(str, ""))
+}
+
+// Simple Condition for string
+// Returns value based on condition
+func ConditionString(cond bool, valid, inValid string) string {
+	if cond {
+		return valid
+	}
+	return inValid
+}
+
+func isNumOrSpace(r rune) bool {
+	return ('0' <= r && r <= '9') || r == ' '
+}
+
+// Format Table Header
+// Replace _ , . and spaces
+func Title(name string) string {
+	origLen := len(name)
+	rs := []rune(name)
+	for i, r := range rs {
+		switch r {
+		case '_':
+			rs[i] = ' '
+		case '.':
+			// ignore floating number 0.0
+			if (i != 0 && !isNumOrSpace(rs[i-1])) || (i != len(rs)-1 && !isNumOrSpace(rs[i+1])) {
+				rs[i] = ' '
+			}
+		}
+	}
+	name = string(rs)
+	name = strings.TrimSpace(name)
+	if len(name) == 0 && origLen > 0 {
+		// Keep at least one character. This is important to preserve
+		// empty lines in multi-line headers/footers.
+		name = " "
+	}
+	return strings.ToUpper(name)
+}
+
+// Pad String
+// Attempts to place string in the center
+func Pad(s, pad string, width int) string {
+	gap := width - DisplayWidth(s)
+	if gap > 0 {
+		gapLeft := int(math.Ceil(float64(gap / 2)))
+		gapRight := gap - gapLeft
+		return strings.Repeat(string(pad), gapLeft) + s + strings.Repeat(string(pad), gapRight)
+	}
+	return s
+}
+
+// Pad String Right position
+// This would place string at the left side of the screen
+func PadRight(s, pad string, width int) string {
+	gap := width - DisplayWidth(s)
+	if gap > 0 {
+		return s + strings.Repeat(string(pad), gap)
+	}
+	return s
+}
+
+// Pad String Left position
+// This would place string at the right side of the screen
+func PadLeft(s, pad string, width int) string {
+	gap := width - DisplayWidth(s)
+	if gap > 0 {
+		return strings.Repeat(string(pad), gap) + s
+	}
+	return s
+}
diff --git a/vendor/github.com/guumaster/tablewriter/wrap.go b/vendor/github.com/guumaster/tablewriter/wrap.go
new file mode 100644
index 0000000..a092ee1
--- /dev/null
+++ b/vendor/github.com/guumaster/tablewriter/wrap.go
@@ -0,0 +1,99 @@
+// Copyright 2014 Oleku Konko All rights reserved.
+// Use of this source code is governed by a MIT
+// license that can be found in the LICENSE file.
+
+// This module is a Table Writer  API for the Go Programming Language.
+// The protocols were written in pure Go and works on windows and unix systems
+
+package tablewriter
+
+import (
+	"math"
+	"strings"
+
+	"github.com/mattn/go-runewidth"
+)
+
+var (
+	nl = "\n"
+	sp = " "
+)
+
+const defaultPenalty = 1e5
+
+// Wrap wraps s into a paragraph of lines of length lim, with minimal
+// raggedness.
+func WrapString(s string, lim int) ([]string, int) {
+	words := strings.Split(strings.Replace(s, nl, sp, -1), sp)
+	var lines []string
+	max := 0
+	for _, v := range words {
+		max = runewidth.StringWidth(v)
+		if max > lim {
+			lim = max
+		}
+	}
+	for _, line := range WrapWords(words, 1, lim, defaultPenalty) {
+		lines = append(lines, strings.Join(line, sp))
+	}
+	return lines, lim
+}
+
+// WrapWords is the low-level line-breaking algorithm, useful if you need more
+// control over the details of the text wrapping process. For most uses,
+// WrapString will be sufficient and more convenient.
+//
+// WrapWords splits a list of words into lines with minimal "raggedness",
+// treating each rune as one unit, accounting for spc units between adjacent
+// words on each line, and attempting to limit lines to lim units. Raggedness
+// is the total error over all lines, where error is the square of the
+// difference of the length of the line and lim. Too-long lines (which only
+// happen when a single word is longer than lim units) have pen penalty units
+// added to the error.
+func WrapWords(words []string, spc, lim, pen int) [][]string {
+	n := len(words)
+
+	length := make([][]int, n)
+	for i := 0; i < n; i++ {
+		length[i] = make([]int, n)
+		length[i][i] = runewidth.StringWidth(words[i])
+		for j := i + 1; j < n; j++ {
+			length[i][j] = length[i][j-1] + spc + runewidth.StringWidth(words[j])
+		}
+	}
+	nbrk := make([]int, n)
+	cost := make([]int, n)
+	for i := range cost {
+		cost[i] = math.MaxInt32
+	}
+	for i := n - 1; i >= 0; i-- {
+		if length[i][n-1] <= lim {
+			cost[i] = 0
+			nbrk[i] = n
+		} else {
+			for j := i + 1; j < n; j++ {
+				d := lim - length[i][j-1]
+				c := d*d + cost[j]
+				if length[i][j-1] > lim {
+					c += pen // too-long lines get a worse penalty
+				}
+				if c < cost[i] {
+					cost[i] = c
+					nbrk[i] = j
+				}
+			}
+		}
+	}
+	var lines [][]string
+	i := 0
+	for i < n {
+		lines = append(lines, words[i:nbrk[i]])
+		i = nbrk[i]
+	}
+	return lines
+}
+
+// getLines decomposes a multiline string into a slice of strings.
+func getLines(s string) []string {
+	return strings.Split(s, nl)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/.travis.yml b/vendor/github.com/jessevdk/go-flags/.travis.yml
new file mode 100644
index 0000000..2fc5e5f
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/.travis.yml
@@ -0,0 +1,39 @@
+language: go
+
+os:
+  - linux
+  - osx
+
+go:
+  - 1.16.x
+
+install:
+  # go-flags
+  - go build -v ./...
+
+  # linting
+  - go get -v golang.org/x/lint/golint
+
+  # code coverage
+  - go get golang.org/x/tools/cmd/cover
+  - go get github.com/onsi/ginkgo/ginkgo
+  - go get github.com/modocache/gover
+  - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then go get github.com/mattn/goveralls; fi
+
+script:
+  # go-flags
+  - $(exit $(gofmt -l . | wc -l))
+  - go test -v ./...
+
+  # linting
+  - go tool vet -all=true -v=true . || true
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/golint ./...
+
+  # code coverage
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/ginkgo -r -cover
+  - $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/gover
+  - if [ "$TRAVIS_SECURE_ENV_VARS" = "true" ]; then $(go env GOPATH | awk 'BEGIN{FS=":"} {print $1}')/bin/goveralls -coverprofile=gover.coverprofile -service=travis-ci -repotoken $COVERALLS_TOKEN; fi
+
+env:
+  # coveralls.io
+  secure: "RCYbiB4P0RjQRIoUx/vG/AjP3mmYCbzOmr86DCww1Z88yNcy3hYr3Cq8rpPtYU5v0g7wTpu4adaKIcqRE9xknYGbqj3YWZiCoBP1/n4Z+9sHW3Dsd9D/GRGeHUus0laJUGARjWoCTvoEtOgTdGQDoX7mH+pUUY0FBltNYUdOiiU="
diff --git a/vendor/github.com/jessevdk/go-flags/LICENSE b/vendor/github.com/jessevdk/go-flags/LICENSE
new file mode 100644
index 0000000..bcca0d5
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/LICENSE
@@ -0,0 +1,26 @@
+Copyright (c) 2012 Jesse van den Kieboom. All rights reserved.
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+   * Redistributions of source code must retain the above copyright
+     notice, this list of conditions and the following disclaimer.
+   * Redistributions in binary form must reproduce the above
+     copyright notice, this list of conditions and the following disclaimer
+     in the documentation and/or other materials provided with the
+     distribution.
+   * Neither the name of Google Inc. nor the names of its
+     contributors may be used to endorse or promote products derived from
+     this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/jessevdk/go-flags/README.md b/vendor/github.com/jessevdk/go-flags/README.md
new file mode 100644
index 0000000..f22650b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/README.md
@@ -0,0 +1,139 @@
+go-flags: a go library for parsing command line arguments
+=========================================================
+
+[![GoDoc](https://godoc.org/github.com/jessevdk/go-flags?status.png)](https://godoc.org/github.com/jessevdk/go-flags) [![Build Status](https://travis-ci.org/jessevdk/go-flags.svg?branch=master)](https://travis-ci.org/jessevdk/go-flags) [![Coverage Status](https://img.shields.io/coveralls/jessevdk/go-flags.svg)](https://coveralls.io/r/jessevdk/go-flags?branch=master)
+
+This library provides similar functionality to the builtin flag library of
+go, but provides much more functionality and nicer formatting. From the
+documentation:
+
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go builtin flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+Supported features:
+* Options with short names (-v)
+* Options with long names (--verbose)
+* Options with and without arguments (bool v.s. other type)
+* Options with optional arguments and default values
+* Multiple option groups each containing a set of options
+* Generate and print well-formatted help message
+* Passing remaining command line arguments after -- (optional)
+* Ignoring unknown command line options (optional)
+* Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+* Supports multiple short options -aux
+* Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+* Supports same option multiple times (can store in slice or last option counts)
+* Supports maps
+* Supports function callbacks
+* Supports namespaces for (nested) option groups
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+```go
+type Options struct {
+	Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+}
+```
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Example:
+--------
+```go
+var opts struct {
+	// Slice of bool will append 'true' each time the option
+	// is encountered (can be set multiple times, like -vvv)
+	Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+
+	// Example of automatic marshalling to desired type (uint)
+	Offset uint `long:"offset" description:"Offset"`
+
+	// Example of a callback, called each time the option is found.
+	Call func(string) `short:"c" description:"Call phone number"`
+
+	// Example of a required flag
+	Name string `short:"n" long:"name" description:"A name" required:"true"`
+
+	// Example of a flag restricted to a pre-defined set of strings
+	Animal string `long:"animal" choice:"cat" choice:"dog"`
+
+	// Example of a value name
+	File string `short:"f" long:"file" description:"A file" value-name:"FILE"`
+
+	// Example of a pointer
+	Ptr *int `short:"p" description:"A pointer to an integer"`
+
+	// Example of a slice of strings
+	StringSlice []string `short:"s" description:"A slice of strings"`
+
+	// Example of a slice of pointers
+	PtrSlice []*string `long:"ptrslice" description:"A slice of pointers to string"`
+
+	// Example of a map
+	IntMap map[string]int `long:"intmap" description:"A map from string to int"`
+}
+
+// Callback which will invoke callto:<argument> to call a number.
+// Note that this works just on OS X (and probably only with
+// Skype) but it shows the idea.
+opts.Call = func(num string) {
+	cmd := exec.Command("open", "callto:"+num)
+	cmd.Start()
+	cmd.Process.Release()
+}
+
+// Make some fake arguments to parse.
+args := []string{
+	"-vv",
+	"--offset=5",
+	"-n", "Me",
+	"--animal", "dog", // anything other than "cat" or "dog" will raise an error
+	"-p", "3",
+	"-s", "hello",
+	"-s", "world",
+	"--ptrslice", "hello",
+	"--ptrslice", "world",
+	"--intmap", "a:1",
+	"--intmap", "b:5",
+	"arg1",
+	"arg2",
+	"arg3",
+}
+
+// Parse flags from `args'. Note that here we use flags.ParseArgs for
+// the sake of making a working example. Normally, you would simply use
+// flags.Parse(&opts) which uses os.Args
+args, err := flags.ParseArgs(&opts, args)
+
+if err != nil {
+	panic(err)
+}
+
+fmt.Printf("Verbosity: %v\n", opts.Verbose)
+fmt.Printf("Offset: %d\n", opts.Offset)
+fmt.Printf("Name: %s\n", opts.Name)
+fmt.Printf("Animal: %s\n", opts.Animal)
+fmt.Printf("Ptr: %d\n", *opts.Ptr)
+fmt.Printf("StringSlice: %v\n", opts.StringSlice)
+fmt.Printf("PtrSlice: [%v %v]\n", *opts.PtrSlice[0], *opts.PtrSlice[1])
+fmt.Printf("IntMap: [a:%v b:%v]\n", opts.IntMap["a"], opts.IntMap["b"])
+fmt.Printf("Remaining args: %s\n", strings.Join(args, " "))
+
+// Output: Verbosity: [true true]
+// Offset: 5
+// Name: Me
+// Ptr: 3
+// StringSlice: [hello world]
+// PtrSlice: [hello world]
+// IntMap: [a:1 b:5]
+// Remaining args: arg1 arg2 arg3
+```
+
+More information can be found in the godocs: <http://godoc.org/github.com/jessevdk/go-flags>
diff --git a/vendor/github.com/jessevdk/go-flags/arg.go b/vendor/github.com/jessevdk/go-flags/arg.go
new file mode 100644
index 0000000..8ec6204
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/arg.go
@@ -0,0 +1,27 @@
+package flags
+
+import (
+	"reflect"
+)
+
+// Arg represents a positional argument on the command line.
+type Arg struct {
+	// The name of the positional argument (used in the help)
+	Name string
+
+	// A description of the positional argument (used in the help)
+	Description string
+
+	// The minimal number of required positional arguments
+	Required int
+
+	// The maximum number of required positional arguments
+	RequiredMaximum int
+
+	value reflect.Value
+	tag   multiTag
+}
+
+func (a *Arg) isRemaining() bool {
+	return a.value.Type().Kind() == reflect.Slice
+}
diff --git a/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
new file mode 100644
index 0000000..5edc430
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/check_crosscompile.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -e
+
+echo '# linux arm7'
+GOARM=7 GOARCH=arm GOOS=linux go build
+echo '# linux arm5'
+GOARM=5 GOARCH=arm GOOS=linux go build
+echo '# windows 386'
+GOARCH=386 GOOS=windows go build
+echo '# windows amd64'
+GOARCH=amd64 GOOS=windows go build
+echo '# darwin'
+GOARCH=amd64 GOOS=darwin go build
+echo '# freebsd'
+GOARCH=amd64 GOOS=freebsd go build
+echo '# aix ppc64'
+GOARCH=ppc64 GOOS=aix go build
+echo '# solaris amd64'
+GOARCH=amd64 GOOS=solaris go build
diff --git a/vendor/github.com/jessevdk/go-flags/closest.go b/vendor/github.com/jessevdk/go-flags/closest.go
new file mode 100644
index 0000000..3b51875
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/closest.go
@@ -0,0 +1,59 @@
+package flags
+
+func levenshtein(s string, t string) int {
+	if len(s) == 0 {
+		return len(t)
+	}
+
+	if len(t) == 0 {
+		return len(s)
+	}
+
+	dists := make([][]int, len(s)+1)
+	for i := range dists {
+		dists[i] = make([]int, len(t)+1)
+		dists[i][0] = i
+	}
+
+	for j := range t {
+		dists[0][j] = j
+	}
+
+	for i, sc := range s {
+		for j, tc := range t {
+			if sc == tc {
+				dists[i+1][j+1] = dists[i][j]
+			} else {
+				dists[i+1][j+1] = dists[i][j] + 1
+				if dists[i+1][j] < dists[i+1][j+1] {
+					dists[i+1][j+1] = dists[i+1][j] + 1
+				}
+				if dists[i][j+1] < dists[i+1][j+1] {
+					dists[i+1][j+1] = dists[i][j+1] + 1
+				}
+			}
+		}
+	}
+
+	return dists[len(s)][len(t)]
+}
+
+func closestChoice(cmd string, choices []string) (string, int) {
+	if len(choices) == 0 {
+		return "", 0
+	}
+
+	mincmd := -1
+	mindist := -1
+
+	for i, c := range choices {
+		l := levenshtein(cmd, c)
+
+		if mincmd < 0 || l < mindist {
+			mindist = l
+			mincmd = i
+		}
+	}
+
+	return choices[mincmd], mindist
+}
diff --git a/vendor/github.com/jessevdk/go-flags/command.go b/vendor/github.com/jessevdk/go-flags/command.go
new file mode 100644
index 0000000..879465d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/command.go
@@ -0,0 +1,465 @@
+package flags
+
+import (
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// Command represents an application command. Commands can be added to the
+// parser (which itself is a command) and are selected/executed when its name
+// is specified on the command line. The Command type embeds a Group and
+// therefore also carries a set of command specific options.
+type Command struct {
+	// Embedded, see Group for more information
+	*Group
+
+	// The name by which the command can be invoked
+	Name string
+
+	// The active sub command (set by parsing) or nil
+	Active *Command
+
+	// Whether subcommands are optional
+	SubcommandsOptional bool
+
+	// Aliases for the command
+	Aliases []string
+
+	// Whether positional arguments are required
+	ArgsRequired bool
+
+	commands            []*Command
+	hasBuiltinHelpGroup bool
+	args                []*Arg
+}
+
+// Commander is an interface which can be implemented by any command added in
+// the options. When implemented, the Execute method will be called for the last
+// specified (sub)command providing the remaining command line arguments.
+type Commander interface {
+	// Execute will be called for the last active (sub)command. The
+	// args argument contains the remaining command line arguments. The
+	// error that Execute returns will be eventually passed out of the
+	// Parse method of the Parser.
+	Execute(args []string) error
+}
+
+// Usage is an interface which can be implemented to show a custom usage string
+// in the help message shown for a command.
+type Usage interface {
+	// Usage is called for commands to allow customized printing of command
+	// usage in the generated help message.
+	Usage() string
+}
+
+type lookup struct {
+	shortNames map[string]*Option
+	longNames  map[string]*Option
+
+	commands map[string]*Command
+}
+
+// AddCommand adds a new command to the parser with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the command. The provided data can implement the Command and
+// Usage interfaces.
+func (c *Command) AddCommand(command string, shortDescription string, longDescription string, data interface{}) (*Command, error) {
+	cmd := newCommand(command, shortDescription, longDescription, data)
+
+	cmd.parent = c
+
+	if err := cmd.scan(); err != nil {
+		return nil, err
+	}
+
+	c.commands = append(c.commands, cmd)
+	return cmd, nil
+}
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (c *Command) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+	group := newGroup(shortDescription, longDescription, data)
+
+	group.parent = c
+
+	if err := group.scanType(c.scanSubcommandHandler(group)); err != nil {
+		return nil, err
+	}
+
+	c.groups = append(c.groups, group)
+	return group, nil
+}
+
+// Commands returns a list of subcommands of this command.
+func (c *Command) Commands() []*Command {
+	return c.commands
+}
+
+// Find locates the subcommand with the given name and returns it. If no such
+// command can be found Find will return nil.
+func (c *Command) Find(name string) *Command {
+	for _, cc := range c.commands {
+		if cc.match(name) {
+			return cc
+		}
+	}
+
+	return nil
+}
+
+// FindOptionByLongName finds an option that is part of the command, or any of
+// its parent commands, by matching its long name (including the option
+// namespace).
+func (c *Command) FindOptionByLongName(longName string) (option *Option) {
+	for option == nil && c != nil {
+		option = c.Group.FindOptionByLongName(longName)
+
+		c, _ = c.parent.(*Command)
+	}
+
+	return option
+}
+
+// FindOptionByShortName finds an option that is part of the command, or any of
+// its parent commands, by matching its long name (including the option
+// namespace).
+func (c *Command) FindOptionByShortName(shortName rune) (option *Option) {
+	for option == nil && c != nil {
+		option = c.Group.FindOptionByShortName(shortName)
+
+		c, _ = c.parent.(*Command)
+	}
+
+	return option
+}
+
+// Args returns a list of positional arguments associated with this command.
+func (c *Command) Args() []*Arg {
+	ret := make([]*Arg, len(c.args))
+	copy(ret, c.args)
+
+	return ret
+}
+
+func newCommand(name string, shortDescription string, longDescription string, data interface{}) *Command {
+	return &Command{
+		Group: newGroup(shortDescription, longDescription, data),
+		Name:  name,
+	}
+}
+
+func (c *Command) scanSubcommandHandler(parentg *Group) scanHandler {
+	f := func(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+		mtag := newMultiTag(string(sfield.Tag))
+
+		if err := mtag.Parse(); err != nil {
+			return true, err
+		}
+
+		positional := mtag.Get("positional-args")
+
+		if len(positional) != 0 {
+			stype := realval.Type()
+
+			for i := 0; i < stype.NumField(); i++ {
+				field := stype.Field(i)
+
+				m := newMultiTag((string(field.Tag)))
+
+				if err := m.Parse(); err != nil {
+					return true, err
+				}
+
+				name := m.Get("positional-arg-name")
+
+				if len(name) == 0 {
+					name = field.Name
+				}
+
+				required := -1
+				requiredMaximum := -1
+
+				sreq := m.Get("required")
+
+				if sreq != "" {
+					required = 1
+
+					rng := strings.SplitN(sreq, "-", 2)
+
+					if len(rng) > 1 {
+						if preq, err := strconv.ParseInt(rng[0], 10, 32); err == nil {
+							required = int(preq)
+						}
+
+						if preq, err := strconv.ParseInt(rng[1], 10, 32); err == nil {
+							requiredMaximum = int(preq)
+						}
+					} else {
+						if preq, err := strconv.ParseInt(sreq, 10, 32); err == nil {
+							required = int(preq)
+						}
+					}
+				}
+
+				arg := &Arg{
+					Name:            name,
+					Description:     m.Get("description"),
+					Required:        required,
+					RequiredMaximum: requiredMaximum,
+
+					value: realval.Field(i),
+					tag:   m,
+				}
+
+				c.args = append(c.args, arg)
+
+				if len(mtag.Get("required")) != 0 {
+					c.ArgsRequired = true
+				}
+			}
+
+			return true, nil
+		}
+
+		subcommand := mtag.Get("command")
+
+		if len(subcommand) != 0 {
+			var ptrval reflect.Value
+
+			if realval.Kind() == reflect.Ptr {
+				ptrval = realval
+
+				if ptrval.IsNil() {
+					ptrval.Set(reflect.New(ptrval.Type().Elem()))
+				}
+			} else {
+				ptrval = realval.Addr()
+			}
+
+			shortDescription := mtag.Get("description")
+			longDescription := mtag.Get("long-description")
+			subcommandsOptional := mtag.Get("subcommands-optional")
+			aliases := mtag.GetMany("alias")
+
+			subc, err := c.AddCommand(subcommand, shortDescription, longDescription, ptrval.Interface())
+
+			if err != nil {
+				return true, err
+			}
+
+			subc.Hidden = mtag.Get("hidden") != ""
+
+			if len(subcommandsOptional) > 0 {
+				subc.SubcommandsOptional = true
+			}
+
+			if len(aliases) > 0 {
+				subc.Aliases = aliases
+			}
+
+			return true, nil
+		}
+
+		return parentg.scanSubGroupHandler(realval, sfield)
+	}
+
+	return f
+}
+
+func (c *Command) scan() error {
+	return c.scanType(c.scanSubcommandHandler(c.Group))
+}
+
+func (c *Command) eachOption(f func(*Command, *Group, *Option)) {
+	c.eachCommand(func(c *Command) {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				f(c, g, option)
+			}
+		})
+	}, true)
+}
+
+func (c *Command) eachCommand(f func(*Command), recurse bool) {
+	f(c)
+
+	for _, cc := range c.commands {
+		if recurse {
+			cc.eachCommand(f, true)
+		} else {
+			f(cc)
+		}
+	}
+}
+
+func (c *Command) eachActiveGroup(f func(cc *Command, g *Group)) {
+	c.eachGroup(func(g *Group) {
+		f(c, g)
+	})
+
+	if c.Active != nil {
+		c.Active.eachActiveGroup(f)
+	}
+}
+
+func (c *Command) addHelpGroups(showHelp func() error) {
+	if !c.hasBuiltinHelpGroup {
+		c.addHelpGroup(showHelp)
+		c.hasBuiltinHelpGroup = true
+	}
+
+	for _, cc := range c.commands {
+		cc.addHelpGroups(showHelp)
+	}
+}
+
+func (c *Command) makeLookup() lookup {
+	ret := lookup{
+		shortNames: make(map[string]*Option),
+		longNames:  make(map[string]*Option),
+		commands:   make(map[string]*Command),
+	}
+
+	parent := c.parent
+
+	var parents []*Command
+
+	for parent != nil {
+		if cmd, ok := parent.(*Command); ok {
+			parents = append(parents, cmd)
+			parent = cmd.parent
+		} else {
+			parent = nil
+		}
+	}
+
+	for i := len(parents) - 1; i >= 0; i-- {
+		parents[i].fillLookup(&ret, true)
+	}
+
+	c.fillLookup(&ret, false)
+	return ret
+}
+
+func (c *Command) fillLookup(ret *lookup, onlyOptions bool) {
+	c.eachGroup(func(g *Group) {
+		for _, option := range g.options {
+			if option.ShortName != 0 {
+				ret.shortNames[string(option.ShortName)] = option
+			}
+
+			if len(option.LongName) > 0 {
+				ret.longNames[option.LongNameWithNamespace()] = option
+			}
+		}
+	})
+
+	if onlyOptions {
+		return
+	}
+
+	for _, subcommand := range c.commands {
+		ret.commands[subcommand.Name] = subcommand
+
+		for _, a := range subcommand.Aliases {
+			ret.commands[a] = subcommand
+		}
+	}
+}
+
+func (c *Command) groupByName(name string) *Group {
+	if grp := c.Group.groupByName(name); grp != nil {
+		return grp
+	}
+
+	for _, subc := range c.commands {
+		prefix := subc.Name + "."
+
+		if strings.HasPrefix(name, prefix) {
+			if grp := subc.groupByName(name[len(prefix):]); grp != nil {
+				return grp
+			}
+		} else if name == subc.Name {
+			return subc.Group
+		}
+	}
+
+	return nil
+}
+
+type commandList []*Command
+
+func (c commandList) Less(i, j int) bool {
+	return c[i].Name < c[j].Name
+}
+
+func (c commandList) Len() int {
+	return len(c)
+}
+
+func (c commandList) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
+
+func (c *Command) sortedVisibleCommands() []*Command {
+	ret := commandList(c.visibleCommands())
+	sort.Sort(ret)
+
+	return []*Command(ret)
+}
+
+func (c *Command) visibleCommands() []*Command {
+	ret := make([]*Command, 0, len(c.commands))
+
+	for _, cmd := range c.commands {
+		if !cmd.Hidden {
+			ret = append(ret, cmd)
+		}
+	}
+
+	return ret
+}
+
+func (c *Command) match(name string) bool {
+	if c.Name == name {
+		return true
+	}
+
+	for _, v := range c.Aliases {
+		if v == name {
+			return true
+		}
+	}
+
+	return false
+}
+
+func (c *Command) hasHelpOptions() bool {
+	ret := false
+
+	c.eachGroup(func(g *Group) {
+		if g.isBuiltinHelp {
+			return
+		}
+
+		for _, opt := range g.options {
+			if opt.showInHelp() {
+				ret = true
+			}
+		}
+	})
+
+	return ret
+}
+
+func (c *Command) fillParseState(s *parseState) {
+	s.positional = make([]*Arg, len(c.args))
+	copy(s.positional, c.args)
+
+	s.lookup = c.makeLookup()
+	s.command = c
+}
diff --git a/vendor/github.com/jessevdk/go-flags/completion.go b/vendor/github.com/jessevdk/go-flags/completion.go
new file mode 100644
index 0000000..8ed61f1
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/completion.go
@@ -0,0 +1,315 @@
+package flags
+
+import (
+	"fmt"
+	"os"
+	"path/filepath"
+	"reflect"
+	"sort"
+	"strings"
+	"unicode/utf8"
+)
+
+// Completion is a type containing information of a completion.
+type Completion struct {
+	// The completed item
+	Item string
+
+	// A description of the completed item (optional)
+	Description string
+}
+
+type completions []Completion
+
+func (c completions) Len() int {
+	return len(c)
+}
+
+func (c completions) Less(i, j int) bool {
+	return c[i].Item < c[j].Item
+}
+
+func (c completions) Swap(i, j int) {
+	c[i], c[j] = c[j], c[i]
+}
+
+// Completer is an interface which can be implemented by types
+// to provide custom command line argument completion.
+type Completer interface {
+	// Complete receives a prefix representing a (partial) value
+	// for its type and should provide a list of possible valid
+	// completions.
+	Complete(match string) []Completion
+}
+
+type completion struct {
+	parser *Parser
+}
+
+// Filename is a string alias which provides filename completion.
+type Filename string
+
+func completionsWithoutDescriptions(items []string) []Completion {
+	ret := make([]Completion, len(items))
+
+	for i, v := range items {
+		ret[i].Item = v
+	}
+
+	return ret
+}
+
+// Complete returns a list of existing files with the given
+// prefix.
+func (f *Filename) Complete(match string) []Completion {
+	ret, _ := filepath.Glob(match + "*")
+	if len(ret) == 1 {
+		if info, err := os.Stat(ret[0]); err == nil && info.IsDir() {
+			ret[0] = ret[0] + "/"
+		}
+	}
+	return completionsWithoutDescriptions(ret)
+}
+
+func (c *completion) skipPositional(s *parseState, n int) {
+	if n >= len(s.positional) {
+		s.positional = nil
+	} else {
+		s.positional = s.positional[n:]
+	}
+}
+
+func (c *completion) completeOptionNames(s *parseState, prefix string, match string, short bool) []Completion {
+	if short && len(match) != 0 {
+		return []Completion{
+			{
+				Item: prefix + match,
+			},
+		}
+	}
+
+	var results []Completion
+	repeats := map[string]bool{}
+
+	for name, opt := range s.lookup.longNames {
+		if strings.HasPrefix(name, match) && !opt.Hidden {
+			results = append(results, Completion{
+				Item:        defaultLongOptDelimiter + name,
+				Description: opt.Description,
+			})
+
+			if short {
+				repeats[string(opt.ShortName)] = true
+			}
+		}
+	}
+
+	if short {
+		for name, opt := range s.lookup.shortNames {
+			if _, exist := repeats[name]; !exist && strings.HasPrefix(name, match) && !opt.Hidden {
+				results = append(results, Completion{
+					Item:        string(defaultShortOptDelimiter) + name,
+					Description: opt.Description,
+				})
+			}
+		}
+	}
+
+	return results
+}
+
+func (c *completion) completeNamesForLongPrefix(s *parseState, prefix string, match string) []Completion {
+	return c.completeOptionNames(s, prefix, match, false)
+}
+
+func (c *completion) completeNamesForShortPrefix(s *parseState, prefix string, match string) []Completion {
+	return c.completeOptionNames(s, prefix, match, true)
+}
+
+func (c *completion) completeCommands(s *parseState, match string) []Completion {
+	n := make([]Completion, 0, len(s.command.commands))
+
+	for _, cmd := range s.command.commands {
+		if cmd.data != c && !cmd.Hidden && strings.HasPrefix(cmd.Name, match) {
+			n = append(n, Completion{
+				Item:        cmd.Name,
+				Description: cmd.ShortDescription,
+			})
+		}
+	}
+
+	return n
+}
+
+func (c *completion) completeValue(value reflect.Value, prefix string, match string) []Completion {
+	if value.Kind() == reflect.Slice {
+		value = reflect.New(value.Type().Elem())
+	}
+	i := value.Interface()
+
+	var ret []Completion
+
+	if cmp, ok := i.(Completer); ok {
+		ret = cmp.Complete(match)
+	} else if value.CanAddr() {
+		if cmp, ok = value.Addr().Interface().(Completer); ok {
+			ret = cmp.Complete(match)
+		}
+	}
+
+	for i, v := range ret {
+		ret[i].Item = prefix + v.Item
+	}
+
+	return ret
+}
+
+func (c *completion) complete(args []string) []Completion {
+	if len(args) == 0 {
+		args = []string{""}
+	}
+
+	s := &parseState{
+		args: args,
+	}
+
+	c.parser.fillParseState(s)
+
+	var opt *Option
+
+	for len(s.args) > 1 {
+		arg := s.pop()
+
+		if (c.parser.Options&PassDoubleDash) != None && arg == "--" {
+			opt = nil
+			c.skipPositional(s, len(s.args)-1)
+
+			break
+		}
+
+		if argumentIsOption(arg) {
+			prefix, optname, islong := stripOptionPrefix(arg)
+			optname, _, argument := splitOption(prefix, optname, islong)
+
+			if argument == nil {
+				var o *Option
+				canarg := true
+
+				if islong {
+					o = s.lookup.longNames[optname]
+				} else {
+					for i, r := range optname {
+						sname := string(r)
+						o = s.lookup.shortNames[sname]
+
+						if o == nil {
+							break
+						}
+
+						if i == 0 && o.canArgument() && len(optname) != len(sname) {
+							canarg = false
+							break
+						}
+					}
+				}
+
+				if o == nil && (c.parser.Options&PassAfterNonOption) != None {
+					opt = nil
+					c.skipPositional(s, len(s.args)-1)
+
+					break
+				} else if o != nil && o.canArgument() && !o.OptionalArgument && canarg {
+					if len(s.args) > 1 {
+						s.pop()
+					} else {
+						opt = o
+					}
+				}
+			}
+		} else {
+			if len(s.positional) > 0 {
+				if !s.positional[0].isRemaining() {
+					// Don't advance beyond a remaining positional arg (because
+					// it consumes all subsequent args).
+					s.positional = s.positional[1:]
+				}
+			} else if cmd, ok := s.lookup.commands[arg]; ok {
+				cmd.fillParseState(s)
+			}
+
+			opt = nil
+		}
+	}
+
+	lastarg := s.args[len(s.args)-1]
+	var ret []Completion
+
+	if opt != nil {
+		// Completion for the argument of 'opt'
+		ret = c.completeValue(opt.value, "", lastarg)
+	} else if argumentStartsOption(lastarg) {
+		// Complete the option
+		prefix, optname, islong := stripOptionPrefix(lastarg)
+		optname, split, argument := splitOption(prefix, optname, islong)
+
+		if argument == nil && !islong {
+			rname, n := utf8.DecodeRuneInString(optname)
+			sname := string(rname)
+
+			if opt := s.lookup.shortNames[sname]; opt != nil && opt.canArgument() {
+				ret = c.completeValue(opt.value, prefix+sname, optname[n:])
+			} else {
+				ret = c.completeNamesForShortPrefix(s, prefix, optname)
+			}
+		} else if argument != nil {
+			if islong {
+				opt = s.lookup.longNames[optname]
+			} else {
+				opt = s.lookup.shortNames[optname]
+			}
+
+			if opt != nil {
+				ret = c.completeValue(opt.value, prefix+optname+split, *argument)
+			}
+		} else if islong {
+			ret = c.completeNamesForLongPrefix(s, prefix, optname)
+		} else {
+			ret = c.completeNamesForShortPrefix(s, prefix, optname)
+		}
+	} else if len(s.positional) > 0 {
+		// Complete for positional argument
+		ret = c.completeValue(s.positional[0].value, "", lastarg)
+	} else if len(s.command.commands) > 0 {
+		// Complete for command
+		ret = c.completeCommands(s, lastarg)
+	}
+
+	sort.Sort(completions(ret))
+	return ret
+}
+
+func (c *completion) print(items []Completion, showDescriptions bool) {
+	if showDescriptions && len(items) > 1 {
+		maxl := 0
+
+		for _, v := range items {
+			if len(v.Item) > maxl {
+				maxl = len(v.Item)
+			}
+		}
+
+		for _, v := range items {
+			fmt.Printf("%s", v.Item)
+
+			if len(v.Description) > 0 {
+				fmt.Printf("%s  # %s", strings.Repeat(" ", maxl-len(v.Item)), v.Description)
+			}
+
+			fmt.Printf("\n")
+		}
+	} else {
+		for _, v := range items {
+			fmt.Println(v.Item)
+		}
+	}
+}
diff --git a/vendor/github.com/jessevdk/go-flags/convert.go b/vendor/github.com/jessevdk/go-flags/convert.go
new file mode 100644
index 0000000..cda29b2
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/convert.go
@@ -0,0 +1,357 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+	"time"
+)
+
+// Marshaler is the interface implemented by types that can marshal themselves
+// to a string representation of the flag.
+type Marshaler interface {
+	// MarshalFlag marshals a flag value to its string representation.
+	MarshalFlag() (string, error)
+}
+
+// Unmarshaler is the interface implemented by types that can unmarshal a flag
+// argument to themselves. The provided value is directly passed from the
+// command line.
+type Unmarshaler interface {
+	// UnmarshalFlag unmarshals a string value representation to the flag
+	// value (which therefore needs to be a pointer receiver).
+	UnmarshalFlag(value string) error
+}
+
+// ValueValidator is the interface implemented by types that can validate a
+// flag argument themselves. The provided value is directly passed from the
+// command line.
+type ValueValidator interface {
+	// IsValidValue returns an error if the provided string value is valid for
+	// the flag.
+	IsValidValue(value string) error
+}
+
+func getBase(options multiTag, base int) (int, error) {
+	sbase := options.Get("base")
+
+	var err error
+	var ivbase int64
+
+	if sbase != "" {
+		ivbase, err = strconv.ParseInt(sbase, 10, 32)
+		base = int(ivbase)
+	}
+
+	return base, err
+}
+
+func convertMarshal(val reflect.Value) (bool, string, error) {
+	// Check first for the Marshaler interface
+	if val.Type().NumMethod() > 0 && val.CanInterface() {
+		if marshaler, ok := val.Interface().(Marshaler); ok {
+			ret, err := marshaler.MarshalFlag()
+			return true, ret, err
+		}
+	}
+
+	return false, "", nil
+}
+
+func convertToString(val reflect.Value, options multiTag) (string, error) {
+	if ok, ret, err := convertMarshal(val); ok {
+		return ret, err
+	}
+
+	tp := val.Type()
+
+	// Support for time.Duration
+	if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+		stringer := val.Interface().(fmt.Stringer)
+		return stringer.String(), nil
+	}
+
+	switch tp.Kind() {
+	case reflect.String:
+		return val.String(), nil
+	case reflect.Bool:
+		if val.Bool() {
+			return "true", nil
+		}
+
+		return "false", nil
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return "", err
+		}
+
+		return strconv.FormatInt(val.Int(), base), nil
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return "", err
+		}
+
+		return strconv.FormatUint(val.Uint(), base), nil
+	case reflect.Float32, reflect.Float64:
+		return strconv.FormatFloat(val.Float(), 'g', -1, tp.Bits()), nil
+	case reflect.Slice:
+		if val.Len() == 0 {
+			return "", nil
+		}
+
+		ret := "["
+
+		for i := 0; i < val.Len(); i++ {
+			if i != 0 {
+				ret += ", "
+			}
+
+			item, err := convertToString(val.Index(i), options)
+
+			if err != nil {
+				return "", err
+			}
+
+			ret += item
+		}
+
+		return ret + "]", nil
+	case reflect.Map:
+		ret := "{"
+
+		for i, key := range val.MapKeys() {
+			if i != 0 {
+				ret += ", "
+			}
+
+			keyitem, err := convertToString(key, options)
+
+			if err != nil {
+				return "", err
+			}
+
+			item, err := convertToString(val.MapIndex(key), options)
+
+			if err != nil {
+				return "", err
+			}
+
+			ret += keyitem + ":" + item
+		}
+
+		return ret + "}", nil
+	case reflect.Ptr:
+		return convertToString(reflect.Indirect(val), options)
+	case reflect.Interface:
+		if !val.IsNil() {
+			return convertToString(val.Elem(), options)
+		}
+	}
+
+	return "", nil
+}
+
+func convertUnmarshal(val string, retval reflect.Value) (bool, error) {
+	if retval.Type().NumMethod() > 0 && retval.CanInterface() {
+		if unmarshaler, ok := retval.Interface().(Unmarshaler); ok {
+			if retval.IsNil() {
+				retval.Set(reflect.New(retval.Type().Elem()))
+
+				// Re-assign from the new value
+				unmarshaler = retval.Interface().(Unmarshaler)
+			}
+
+			return true, unmarshaler.UnmarshalFlag(val)
+		}
+	}
+
+	if retval.Type().Kind() != reflect.Ptr && retval.CanAddr() {
+		return convertUnmarshal(val, retval.Addr())
+	}
+
+	if retval.Type().Kind() == reflect.Interface && !retval.IsNil() {
+		return convertUnmarshal(val, retval.Elem())
+	}
+
+	return false, nil
+}
+
+func convert(val string, retval reflect.Value, options multiTag) error {
+	if ok, err := convertUnmarshal(val, retval); ok {
+		return err
+	}
+
+	tp := retval.Type()
+
+	// Support for time.Duration
+	if tp == reflect.TypeOf((*time.Duration)(nil)).Elem() {
+		parsed, err := time.ParseDuration(val)
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetInt(int64(parsed))
+		return nil
+	}
+
+	switch tp.Kind() {
+	case reflect.String:
+		retval.SetString(val)
+	case reflect.Bool:
+		if val == "" {
+			retval.SetBool(true)
+		} else {
+			b, err := strconv.ParseBool(val)
+
+			if err != nil {
+				return err
+			}
+
+			retval.SetBool(b)
+		}
+	case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return err
+		}
+
+		parsed, err := strconv.ParseInt(val, base, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetInt(parsed)
+	case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
+		base, err := getBase(options, 10)
+
+		if err != nil {
+			return err
+		}
+
+		parsed, err := strconv.ParseUint(val, base, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetUint(parsed)
+	case reflect.Float32, reflect.Float64:
+		parsed, err := strconv.ParseFloat(val, tp.Bits())
+
+		if err != nil {
+			return err
+		}
+
+		retval.SetFloat(parsed)
+	case reflect.Slice:
+		elemtp := tp.Elem()
+
+		elemvalptr := reflect.New(elemtp)
+		elemval := reflect.Indirect(elemvalptr)
+
+		if err := convert(val, elemval, options); err != nil {
+			return err
+		}
+
+		retval.Set(reflect.Append(retval, elemval))
+	case reflect.Map:
+		parts := strings.SplitN(val, ":", 2)
+
+		key := parts[0]
+		var value string
+
+		if len(parts) == 2 {
+			value = parts[1]
+		}
+
+		keytp := tp.Key()
+		keyval := reflect.New(keytp)
+
+		if err := convert(key, keyval, options); err != nil {
+			return err
+		}
+
+		valuetp := tp.Elem()
+		valueval := reflect.New(valuetp)
+
+		if err := convert(value, valueval, options); err != nil {
+			return err
+		}
+
+		if retval.IsNil() {
+			retval.Set(reflect.MakeMap(tp))
+		}
+
+		retval.SetMapIndex(reflect.Indirect(keyval), reflect.Indirect(valueval))
+	case reflect.Ptr:
+		if retval.IsNil() {
+			retval.Set(reflect.New(retval.Type().Elem()))
+		}
+
+		return convert(val, reflect.Indirect(retval), options)
+	case reflect.Interface:
+		if !retval.IsNil() {
+			return convert(val, retval.Elem(), options)
+		}
+	}
+
+	return nil
+}
+
+func isPrint(s string) bool {
+	for _, c := range s {
+		if !strconv.IsPrint(c) {
+			return false
+		}
+	}
+
+	return true
+}
+
+func quoteIfNeeded(s string) string {
+	if !isPrint(s) {
+		return strconv.Quote(s)
+	}
+
+	return s
+}
+
+func quoteIfNeededV(s []string) []string {
+	ret := make([]string, len(s))
+
+	for i, v := range s {
+		ret[i] = quoteIfNeeded(v)
+	}
+
+	return ret
+}
+
+func quoteV(s []string) []string {
+	ret := make([]string, len(s))
+
+	for i, v := range s {
+		ret[i] = strconv.Quote(v)
+	}
+
+	return ret
+}
+
+func unquoteIfPossible(s string) (string, error) {
+	if len(s) == 0 || s[0] != '"' {
+		return s, nil
+	}
+
+	return strconv.Unquote(s)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/error.go b/vendor/github.com/jessevdk/go-flags/error.go
new file mode 100644
index 0000000..73e07cf
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/error.go
@@ -0,0 +1,138 @@
+package flags
+
+import (
+	"fmt"
+)
+
+// ErrorType represents the type of error.
+type ErrorType uint
+
+const (
+	// ErrUnknown indicates a generic error.
+	ErrUnknown ErrorType = iota
+
+	// ErrExpectedArgument indicates that an argument was expected.
+	ErrExpectedArgument
+
+	// ErrUnknownFlag indicates an unknown flag.
+	ErrUnknownFlag
+
+	// ErrUnknownGroup indicates an unknown group.
+	ErrUnknownGroup
+
+	// ErrMarshal indicates a marshalling error while converting values.
+	ErrMarshal
+
+	// ErrHelp indicates that the built-in help was shown (the error
+	// contains the help message).
+	ErrHelp
+
+	// ErrNoArgumentForBool indicates that an argument was given for a
+	// boolean flag (which don't not take any arguments).
+	ErrNoArgumentForBool
+
+	// ErrRequired indicates that a required flag was not provided.
+	ErrRequired
+
+	// ErrShortNameTooLong indicates that a short flag name was specified,
+	// longer than one character.
+	ErrShortNameTooLong
+
+	// ErrDuplicatedFlag indicates that a short or long flag has been
+	// defined more than once
+	ErrDuplicatedFlag
+
+	// ErrTag indicates an error while parsing flag tags.
+	ErrTag
+
+	// ErrCommandRequired indicates that a command was required but not
+	// specified
+	ErrCommandRequired
+
+	// ErrUnknownCommand indicates that an unknown command was specified.
+	ErrUnknownCommand
+
+	// ErrInvalidChoice indicates an invalid option value which only allows
+	// a certain number of choices.
+	ErrInvalidChoice
+
+	// ErrInvalidTag indicates an invalid tag or invalid use of an existing tag
+	ErrInvalidTag
+)
+
+func (e ErrorType) String() string {
+	switch e {
+	case ErrUnknown:
+		return "unknown"
+	case ErrExpectedArgument:
+		return "expected argument"
+	case ErrUnknownFlag:
+		return "unknown flag"
+	case ErrUnknownGroup:
+		return "unknown group"
+	case ErrMarshal:
+		return "marshal"
+	case ErrHelp:
+		return "help"
+	case ErrNoArgumentForBool:
+		return "no argument for bool"
+	case ErrRequired:
+		return "required"
+	case ErrShortNameTooLong:
+		return "short name too long"
+	case ErrDuplicatedFlag:
+		return "duplicated flag"
+	case ErrTag:
+		return "tag"
+	case ErrCommandRequired:
+		return "command required"
+	case ErrUnknownCommand:
+		return "unknown command"
+	case ErrInvalidChoice:
+		return "invalid choice"
+	case ErrInvalidTag:
+		return "invalid tag"
+	}
+
+	return "unrecognized error type"
+}
+
+func (e ErrorType) Error() string {
+	return e.String()
+}
+
+// Error represents a parser error. The error returned from Parse is of this
+// type. The error contains both a Type and Message.
+type Error struct {
+	// The type of error
+	Type ErrorType
+
+	// The error message
+	Message string
+}
+
+// Error returns the error's message
+func (e *Error) Error() string {
+	return e.Message
+}
+
+func newError(tp ErrorType, message string) *Error {
+	return &Error{
+		Type:    tp,
+		Message: message,
+	}
+}
+
+func newErrorf(tp ErrorType, format string, args ...interface{}) *Error {
+	return newError(tp, fmt.Sprintf(format, args...))
+}
+
+func wrapError(err error) *Error {
+	ret, ok := err.(*Error)
+
+	if !ok {
+		return newError(ErrUnknown, err.Error())
+	}
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/flags.go b/vendor/github.com/jessevdk/go-flags/flags.go
new file mode 100644
index 0000000..ac2157d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/flags.go
@@ -0,0 +1,263 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package flags provides an extensive command line option parser.
+The flags package is similar in functionality to the go built-in flag package
+but provides more options and uses reflection to provide a convenient and
+succinct way of specifying command line options.
+
+
+Supported features
+
+The following features are supported in go-flags:
+
+    Options with short names (-v)
+    Options with long names (--verbose)
+    Options with and without arguments (bool v.s. other type)
+    Options with optional arguments and default values
+    Option default values from ENVIRONMENT_VARIABLES, including slice and map values
+    Multiple option groups each containing a set of options
+    Generate and print well-formatted help message
+    Passing remaining command line arguments after -- (optional)
+    Ignoring unknown command line options (optional)
+    Supports -I/usr/include -I=/usr/include -I /usr/include option argument specification
+    Supports multiple short options -aux
+    Supports all primitive go types (string, int{8..64}, uint{8..64}, float)
+    Supports same option multiple times (can store in slice or last option counts)
+    Supports maps
+    Supports function callbacks
+    Supports namespaces for (nested) option groups
+
+Additional features specific to Windows:
+    Options with short names (/v)
+    Options with long names (/verbose)
+    Windows-style options with arguments use a colon as the delimiter
+    Modify generated help message with Windows-style / options
+    Windows style options can be disabled at build time using the "forceposix"
+    build tag
+
+
+Basic usage
+
+The flags package uses structs, reflection and struct field tags
+to allow users to specify command line options. This results in very simple
+and concise specification of your application options. For example:
+
+    type Options struct {
+        Verbose []bool `short:"v" long:"verbose" description:"Show verbose debug information"`
+    }
+
+This specifies one option with a short name -v and a long name --verbose.
+When either -v or --verbose is found on the command line, a 'true' value
+will be appended to the Verbose field. e.g. when specifying -vvv, the
+resulting value of Verbose will be {[true, true, true]}.
+
+Slice options work exactly the same as primitive type options, except that
+whenever the option is encountered, a value is appended to the slice.
+
+Map options from string to primitive type are also supported. On the command
+line, you specify the value for such an option as key:value. For example
+
+    type Options struct {
+        AuthorInfo string[string] `short:"a"`
+    }
+
+Then, the AuthorInfo map can be filled with something like
+-a name:Jesse -a "surname:van den Kieboom".
+
+Finally, for full control over the conversion between command line argument
+values and options, user defined types can choose to implement the Marshaler
+and Unmarshaler interfaces.
+
+
+Available field tags
+
+The following is a list of tags for struct fields supported by go-flags:
+
+    short:            the short name of the option (single character)
+    long:             the long name of the option
+    required:         if non empty, makes the option required to appear on the command
+                      line. If a required option is not present, the parser will
+                      return ErrRequired (optional)
+    description:      the description of the option (optional)
+    long-description: the long description of the option. Currently only
+                      displayed in generated man pages (optional)
+    no-flag:          if non-empty, this field is ignored as an option (optional)
+
+    optional:       if non-empty, makes the argument of the option optional. When an
+                    argument is optional it can only be specified using
+                    --option=argument (optional)
+    optional-value: the value of an optional option when the option occurs
+                    without an argument. This tag can be specified multiple
+                    times in the case of maps or slices (optional)
+    default:        the default value of an option. This tag can be specified
+                    multiple times in the case of slices or maps (optional)
+    default-mask:   when specified, this value will be displayed in the help
+                    instead of the actual default value. This is useful
+                    mostly for hiding otherwise sensitive information from
+                    showing up in the help. If default-mask takes the special
+                    value "-", then no default value will be shown at all
+                    (optional)
+    env:            the default value of the option is overridden from the
+                    specified environment variable, if one has been defined.
+                    (optional)
+    env-delim:      the 'env' default value from environment is split into
+                    multiple values with the given delimiter string, use with
+                    slices and maps (optional)
+    value-name:     the name of the argument value (to be shown in the help)
+                    (optional)
+    choice:         limits the values for an option to a set of values.
+                    Repeat this tag once for each allowable value.
+                    e.g. `long:"animal" choice:"cat" choice:"dog"`
+    hidden:         if non-empty, the option is not visible in the help or man page.
+
+    base: a base (radix) used to convert strings to integer values, the
+          default base is 10 (i.e. decimal) (optional)
+
+    ini-name:       the explicit ini option name (optional)
+    no-ini:         if non-empty this field is ignored as an ini option
+                    (optional)
+
+    group:                when specified on a struct field, makes the struct
+                          field a separate group with the given name (optional)
+    namespace:            when specified on a group struct field, the namespace
+                          gets prepended to every option's long name and
+                          subgroup's namespace of this group, separated by
+                          the parser's namespace delimiter (optional)
+    env-namespace:        when specified on a group struct field, the env-namespace
+                          gets prepended to every option's env key and
+                          subgroup's env-namespace of this group, separated by
+                          the parser's env-namespace delimiter (optional)
+    command:              when specified on a struct field, makes the struct
+                          field a (sub)command with the given name (optional)
+    subcommands-optional: when specified on a command struct field, makes
+                          any subcommands of that command optional (optional)
+    alias:                when specified on a command struct field, adds the
+                          specified name as an alias for the command. Can be
+                          be specified multiple times to add more than one
+                          alias (optional)
+    positional-args:      when specified on a field with a struct type,
+                          uses the fields of that struct to parse remaining
+                          positional command line arguments into (in order
+                          of the fields). If a field has a slice type,
+                          then all remaining arguments will be added to it.
+                          Positional arguments are optional by default,
+                          unless the "required" tag is specified together
+                          with the "positional-args" tag. The "required" tag
+                          can also be set on the individual rest argument
+                          fields, to require only the first N positional
+                          arguments. If the "required" tag is set on the
+                          rest arguments slice, then its value determines
+                          the minimum amount of rest arguments that needs to
+                          be provided (e.g. `required:"2"`) (optional)
+    positional-arg-name:  used on a field in a positional argument struct; name
+                          of the positional argument placeholder to be shown in
+                          the help (optional)
+
+Either the `short:` tag or the `long:` must be specified to make the field eligible as an
+option.
+
+
+Option groups
+
+Option groups are a simple way to semantically separate your options. All
+options in a particular group are shown together in the help under the name
+of the group. Namespaces can be used to specify option long names more
+precisely and emphasize the options affiliation to their group.
+
+There are currently three ways to specify option groups.
+
+    1. Use NewNamedParser specifying the various option groups.
+    2. Use AddGroup to add a group to an existing parser.
+    3. Add a struct field to the top-level options annotated with the
+       group:"group-name" tag.
+
+
+
+Commands
+
+The flags package also has basic support for commands. Commands are often
+used in monolithic applications that support various commands or actions.
+Take git for example, all of the add, commit, checkout, etc. are called
+commands. Using commands you can easily separate multiple functions of your
+application.
+
+There are currently two ways to specify a command.
+
+    1. Use AddCommand on an existing parser.
+    2. Add a struct field to your options struct annotated with the
+       command:"command-name" tag.
+
+The most common, idiomatic way to implement commands is to define a global
+parser instance and implement each command in a separate file. These
+command files should define a go init function which calls AddCommand on
+the global parser.
+
+When parsing ends and there is an active command and that command implements
+the Commander interface, then its Execute method will be run with the
+remaining command line arguments.
+
+Command structs can have options which become valid to parse after the
+command has been specified on the command line, in addition to the options
+of all the parent commands. I.e. considering a -v flag on the parser and an
+add command, the following are equivalent:
+
+    ./app -v add
+    ./app add -v
+
+However, if the -v flag is defined on the add command, then the first of
+the two examples above would fail since the -v flag is not defined before
+the add command.
+
+
+Completion
+
+go-flags has builtin support to provide bash completion of flags, commands
+and argument values. To use completion, the binary which uses go-flags
+can be invoked in a special environment to list completion of the current
+command line argument. It should be noted that this `executes` your application,
+and it is up to the user to make sure there are no negative side effects (for
+example from init functions).
+
+Setting the environment variable `GO_FLAGS_COMPLETION=1` enables completion
+by replacing the argument parsing routine with the completion routine which
+outputs completions for the passed arguments. The basic invocation to
+complete a set of arguments is therefore:
+
+    GO_FLAGS_COMPLETION=1 ./completion-example arg1 arg2 arg3
+
+where `completion-example` is the binary, `arg1` and `arg2` are
+the current arguments, and `arg3` (the last argument) is the argument
+to be completed. If the GO_FLAGS_COMPLETION is set to "verbose", then
+descriptions of possible completion items will also be shown, if there
+are more than 1 completion items.
+
+To use this with bash completion, a simple file can be written which
+calls the binary which supports go-flags completion:
+
+    _completion_example() {
+        # All arguments except the first one
+        args=("${COMP_WORDS[@]:1:$COMP_CWORD}")
+
+        # Only split on newlines
+        local IFS=$'\n'
+
+        # Call completion (note that the first element of COMP_WORDS is
+        # the executable itself)
+        COMPREPLY=($(GO_FLAGS_COMPLETION=1 ${COMP_WORDS[0]} "${args[@]}"))
+        return 0
+    }
+
+    complete -F _completion_example completion-example
+
+Completion requires the parser option PassDoubleDash and is therefore enforced if the environment variable GO_FLAGS_COMPLETION is set.
+
+Customized completion for argument values is supported by implementing
+the flags.Completer interface for the argument value type. An example
+of a type which does so is the flags.Filename type, an alias of string
+allowing simple filename completion. A slice or array argument value
+whose element type implements flags.Completer will also be completed.
+*/
+package flags
diff --git a/vendor/github.com/jessevdk/go-flags/go.mod b/vendor/github.com/jessevdk/go-flags/go.mod
new file mode 100644
index 0000000..a626c5d
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/go.mod
@@ -0,0 +1,5 @@
+module github.com/jessevdk/go-flags
+
+go 1.15
+
+require golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4
diff --git a/vendor/github.com/jessevdk/go-flags/go.sum b/vendor/github.com/jessevdk/go-flags/go.sum
new file mode 100644
index 0000000..7503251
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/go.sum
@@ -0,0 +1,2 @@
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4 h1:EZ2mChiOa8udjfp6rRmswTbtZN/QzUQp4ptM4rnjHvc=
+golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
diff --git a/vendor/github.com/jessevdk/go-flags/group.go b/vendor/github.com/jessevdk/go-flags/group.go
new file mode 100644
index 0000000..181caab
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/group.go
@@ -0,0 +1,429 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"errors"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// ErrNotPointerToStruct indicates that a provided data container is not
+// a pointer to a struct. Only pointers to structs are valid data containers
+// for options.
+var ErrNotPointerToStruct = errors.New("provided data is not a pointer to struct")
+
+// Group represents an option group. Option groups can be used to logically
+// group options together under a description. Groups are only used to provide
+// more structure to options both for the user (as displayed in the help message)
+// and for you, since groups can be nested.
+type Group struct {
+	// A short description of the group. The
+	// short description is primarily used in the built-in generated help
+	// message
+	ShortDescription string
+
+	// A long description of the group. The long
+	// description is primarily used to present information on commands
+	// (Command embeds Group) in the built-in generated help and man pages.
+	LongDescription string
+
+	// The namespace of the group
+	Namespace string
+
+	// The environment namespace of the group
+	EnvNamespace string
+
+	// If true, the group is not displayed in the help or man page
+	Hidden bool
+
+	// The parent of the group or nil if it has no parent
+	parent interface{}
+
+	// All the options in the group
+	options []*Option
+
+	// All the subgroups
+	groups []*Group
+
+	// Whether the group represents the built-in help group
+	isBuiltinHelp bool
+
+	data interface{}
+}
+
+type scanHandler func(reflect.Value, *reflect.StructField) (bool, error)
+
+// AddGroup adds a new group to the command with the given name and data. The
+// data needs to be a pointer to a struct from which the fields indicate which
+// options are in the group.
+func (g *Group) AddGroup(shortDescription string, longDescription string, data interface{}) (*Group, error) {
+	group := newGroup(shortDescription, longDescription, data)
+
+	group.parent = g
+
+	if err := group.scan(); err != nil {
+		return nil, err
+	}
+
+	g.groups = append(g.groups, group)
+	return group, nil
+}
+
+// AddOption adds a new option to this group.
+func (g *Group) AddOption(option *Option, data interface{}) {
+	option.value = reflect.ValueOf(data)
+	option.group = g
+	g.options = append(g.options, option)
+}
+
+// Groups returns the list of groups embedded in this group.
+func (g *Group) Groups() []*Group {
+	return g.groups
+}
+
+// Options returns the list of options in this group.
+func (g *Group) Options() []*Option {
+	return g.options
+}
+
+// Find locates the subgroup with the given short description and returns it.
+// If no such group can be found Find will return nil. Note that the description
+// is matched case insensitively.
+func (g *Group) Find(shortDescription string) *Group {
+	lshortDescription := strings.ToLower(shortDescription)
+
+	var ret *Group
+
+	g.eachGroup(func(gg *Group) {
+		if gg != g && strings.ToLower(gg.ShortDescription) == lshortDescription {
+			ret = gg
+		}
+	})
+
+	return ret
+}
+
+func (g *Group) findOption(matcher func(*Option) bool) (option *Option) {
+	g.eachGroup(func(g *Group) {
+		for _, opt := range g.options {
+			if option == nil && matcher(opt) {
+				option = opt
+			}
+		}
+	})
+
+	return option
+}
+
+// FindOptionByLongName finds an option that is part of the group, or any of its
+// subgroups, by matching its long name (including the option namespace).
+func (g *Group) FindOptionByLongName(longName string) *Option {
+	return g.findOption(func(option *Option) bool {
+		return option.LongNameWithNamespace() == longName
+	})
+}
+
+// FindOptionByShortName finds an option that is part of the group, or any of
+// its subgroups, by matching its short name.
+func (g *Group) FindOptionByShortName(shortName rune) *Option {
+	return g.findOption(func(option *Option) bool {
+		return option.ShortName == shortName
+	})
+}
+
+func newGroup(shortDescription string, longDescription string, data interface{}) *Group {
+	return &Group{
+		ShortDescription: shortDescription,
+		LongDescription:  longDescription,
+
+		data: data,
+	}
+}
+
+func (g *Group) optionByName(name string, namematch func(*Option, string) bool) *Option {
+	prio := 0
+	var retopt *Option
+
+	g.eachGroup(func(g *Group) {
+		for _, opt := range g.options {
+			if namematch != nil && namematch(opt, name) && prio < 4 {
+				retopt = opt
+				prio = 4
+			}
+
+			if name == opt.field.Name && prio < 3 {
+				retopt = opt
+				prio = 3
+			}
+
+			if name == opt.LongNameWithNamespace() && prio < 2 {
+				retopt = opt
+				prio = 2
+			}
+
+			if opt.ShortName != 0 && name == string(opt.ShortName) && prio < 1 {
+				retopt = opt
+				prio = 1
+			}
+		}
+	})
+
+	return retopt
+}
+
+func (g *Group) showInHelp() bool {
+	if g.Hidden {
+		return false
+	}
+	for _, opt := range g.options {
+		if opt.showInHelp() {
+			return true
+		}
+	}
+	return false
+}
+
+func (g *Group) eachGroup(f func(*Group)) {
+	f(g)
+
+	for _, gg := range g.groups {
+		gg.eachGroup(f)
+	}
+}
+
+func isStringFalsy(s string) bool {
+	return s == "" || s == "false" || s == "no" || s == "0"
+}
+
+func (g *Group) scanStruct(realval reflect.Value, sfield *reflect.StructField, handler scanHandler) error {
+	stype := realval.Type()
+
+	if sfield != nil {
+		if ok, err := handler(realval, sfield); err != nil {
+			return err
+		} else if ok {
+			return nil
+		}
+	}
+
+	for i := 0; i < stype.NumField(); i++ {
+		field := stype.Field(i)
+
+		// PkgName is set only for non-exported fields, which we ignore
+		if field.PkgPath != "" && !field.Anonymous {
+			continue
+		}
+
+		mtag := newMultiTag(string(field.Tag))
+
+		if err := mtag.Parse(); err != nil {
+			return err
+		}
+
+		// Skip fields with the no-flag tag
+		if mtag.Get("no-flag") != "" {
+			continue
+		}
+
+		// Dive deep into structs or pointers to structs
+		kind := field.Type.Kind()
+		fld := realval.Field(i)
+
+		if kind == reflect.Struct {
+			if err := g.scanStruct(fld, &field, handler); err != nil {
+				return err
+			}
+		} else if kind == reflect.Ptr && field.Type.Elem().Kind() == reflect.Struct {
+			flagCountBefore := len(g.options) + len(g.groups)
+
+			if fld.IsNil() {
+				fld = reflect.New(fld.Type().Elem())
+			}
+
+			if err := g.scanStruct(reflect.Indirect(fld), &field, handler); err != nil {
+				return err
+			}
+
+			if len(g.options)+len(g.groups) != flagCountBefore {
+				realval.Field(i).Set(fld)
+			}
+		}
+
+		longname := mtag.Get("long")
+		shortname := mtag.Get("short")
+
+		// Need at least either a short or long name
+		if longname == "" && shortname == "" && mtag.Get("ini-name") == "" {
+			continue
+		}
+
+		short := rune(0)
+		rc := utf8.RuneCountInString(shortname)
+
+		if rc > 1 {
+			return newErrorf(ErrShortNameTooLong,
+				"short names can only be 1 character long, not `%s'",
+				shortname)
+
+		} else if rc == 1 {
+			short, _ = utf8.DecodeRuneInString(shortname)
+		}
+
+		description := mtag.Get("description")
+		def := mtag.GetMany("default")
+
+		optionalValue := mtag.GetMany("optional-value")
+		valueName := mtag.Get("value-name")
+		defaultMask := mtag.Get("default-mask")
+
+		optional := !isStringFalsy(mtag.Get("optional"))
+		required := !isStringFalsy(mtag.Get("required"))
+		choices := mtag.GetMany("choice")
+		hidden := !isStringFalsy(mtag.Get("hidden"))
+
+		option := &Option{
+			Description:      description,
+			ShortName:        short,
+			LongName:         longname,
+			Default:          def,
+			EnvDefaultKey:    mtag.Get("env"),
+			EnvDefaultDelim:  mtag.Get("env-delim"),
+			OptionalArgument: optional,
+			OptionalValue:    optionalValue,
+			Required:         required,
+			ValueName:        valueName,
+			DefaultMask:      defaultMask,
+			Choices:          choices,
+			Hidden:           hidden,
+
+			group: g,
+
+			field: field,
+			value: realval.Field(i),
+			tag:   mtag,
+		}
+
+		if option.isBool() && option.Default != nil {
+			return newErrorf(ErrInvalidTag,
+				"boolean flag `%s' may not have default values, they always default to `false' and can only be turned on",
+				option.shortAndLongName())
+		}
+
+		g.options = append(g.options, option)
+	}
+
+	return nil
+}
+
+func (g *Group) checkForDuplicateFlags() *Error {
+	shortNames := make(map[rune]*Option)
+	longNames := make(map[string]*Option)
+
+	var duplicateError *Error
+
+	g.eachGroup(func(g *Group) {
+		for _, option := range g.options {
+			if option.LongName != "" {
+				longName := option.LongNameWithNamespace()
+
+				if otherOption, ok := longNames[longName]; ok {
+					duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same long name as option `%s'", option, otherOption)
+					return
+				}
+				longNames[longName] = option
+			}
+			if option.ShortName != 0 {
+				if otherOption, ok := shortNames[option.ShortName]; ok {
+					duplicateError = newErrorf(ErrDuplicatedFlag, "option `%s' uses the same short name as option `%s'", option, otherOption)
+					return
+				}
+				shortNames[option.ShortName] = option
+			}
+		}
+	})
+
+	return duplicateError
+}
+
+func (g *Group) scanSubGroupHandler(realval reflect.Value, sfield *reflect.StructField) (bool, error) {
+	mtag := newMultiTag(string(sfield.Tag))
+
+	if err := mtag.Parse(); err != nil {
+		return true, err
+	}
+
+	subgroup := mtag.Get("group")
+
+	if len(subgroup) != 0 {
+		var ptrval reflect.Value
+
+		if realval.Kind() == reflect.Ptr {
+			ptrval = realval
+
+			if ptrval.IsNil() {
+				ptrval.Set(reflect.New(ptrval.Type()))
+			}
+		} else {
+			ptrval = realval.Addr()
+		}
+
+		description := mtag.Get("description")
+
+		group, err := g.AddGroup(subgroup, description, ptrval.Interface())
+
+		if err != nil {
+			return true, err
+		}
+
+		group.Namespace = mtag.Get("namespace")
+		group.EnvNamespace = mtag.Get("env-namespace")
+		group.Hidden = mtag.Get("hidden") != ""
+
+		return true, nil
+	}
+
+	return false, nil
+}
+
+func (g *Group) scanType(handler scanHandler) error {
+	// Get all the public fields in the data struct
+	ptrval := reflect.ValueOf(g.data)
+
+	if ptrval.Type().Kind() != reflect.Ptr {
+		panic(ErrNotPointerToStruct)
+	}
+
+	stype := ptrval.Type().Elem()
+
+	if stype.Kind() != reflect.Struct {
+		panic(ErrNotPointerToStruct)
+	}
+
+	realval := reflect.Indirect(ptrval)
+
+	if err := g.scanStruct(realval, nil, handler); err != nil {
+		return err
+	}
+
+	if err := g.checkForDuplicateFlags(); err != nil {
+		return err
+	}
+
+	return nil
+}
+
+func (g *Group) scan() error {
+	return g.scanType(g.scanSubGroupHandler)
+}
+
+func (g *Group) groupByName(name string) *Group {
+	if len(name) == 0 {
+		return g
+	}
+
+	return g.Find(name)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/help.go b/vendor/github.com/jessevdk/go-flags/help.go
new file mode 100644
index 0000000..068fce1
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/help.go
@@ -0,0 +1,514 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"bufio"
+	"bytes"
+	"fmt"
+	"io"
+	"runtime"
+	"strings"
+	"unicode/utf8"
+)
+
+type alignmentInfo struct {
+	maxLongLen      int
+	hasShort        bool
+	hasValueName    bool
+	terminalColumns int
+	indent          bool
+}
+
+const (
+	paddingBeforeOption                 = 2
+	distanceBetweenOptionAndDescription = 2
+)
+
+func (a *alignmentInfo) descriptionStart() int {
+	ret := a.maxLongLen + distanceBetweenOptionAndDescription
+
+	if a.hasShort {
+		ret += 2
+	}
+
+	if a.maxLongLen > 0 {
+		ret += 4
+	}
+
+	if a.hasValueName {
+		ret += 3
+	}
+
+	return ret
+}
+
+func (a *alignmentInfo) updateLen(name string, indent bool) {
+	l := utf8.RuneCountInString(name)
+
+	if indent {
+		l = l + 4
+	}
+
+	if l > a.maxLongLen {
+		a.maxLongLen = l
+	}
+}
+
+func (p *Parser) getAlignmentInfo() alignmentInfo {
+	ret := alignmentInfo{
+		maxLongLen:      0,
+		hasShort:        false,
+		hasValueName:    false,
+		terminalColumns: getTerminalColumns(),
+	}
+
+	if ret.terminalColumns <= 0 {
+		ret.terminalColumns = 80
+	}
+
+	var prevcmd *Command
+
+	p.eachActiveGroup(func(c *Command, grp *Group) {
+		if !grp.showInHelp() {
+			return
+		}
+		if c != prevcmd {
+			for _, arg := range c.args {
+				ret.updateLen(arg.Name, c != p.Command)
+			}
+		}
+
+		for _, info := range grp.options {
+			if !info.showInHelp() {
+				continue
+			}
+
+			if info.ShortName != 0 {
+				ret.hasShort = true
+			}
+
+			if len(info.ValueName) > 0 {
+				ret.hasValueName = true
+			}
+
+			l := info.LongNameWithNamespace() + info.ValueName
+
+			if len(info.Choices) != 0 {
+				l += "[" + strings.Join(info.Choices, "|") + "]"
+			}
+
+			ret.updateLen(l, c != p.Command)
+		}
+	})
+
+	return ret
+}
+
+func wrapText(s string, l int, prefix string) string {
+	var ret string
+
+	if l < 10 {
+		l = 10
+	}
+
+	// Basic text wrapping of s at spaces to fit in l
+	lines := strings.Split(s, "\n")
+
+	for _, line := range lines {
+		var retline string
+
+		line = strings.TrimSpace(line)
+
+		for len(line) > l {
+			// Try to split on space
+			suffix := ""
+
+			pos := strings.LastIndex(line[:l], " ")
+
+			if pos < 0 {
+				pos = l - 1
+				suffix = "-\n"
+			}
+
+			if len(retline) != 0 {
+				retline += "\n" + prefix
+			}
+
+			retline += strings.TrimSpace(line[:pos]) + suffix
+			line = strings.TrimSpace(line[pos:])
+		}
+
+		if len(line) > 0 {
+			if len(retline) != 0 {
+				retline += "\n" + prefix
+			}
+
+			retline += line
+		}
+
+		if len(ret) > 0 {
+			ret += "\n"
+
+			if len(retline) > 0 {
+				ret += prefix
+			}
+		}
+
+		ret += retline
+	}
+
+	return ret
+}
+
+func (p *Parser) writeHelpOption(writer *bufio.Writer, option *Option, info alignmentInfo) {
+	line := &bytes.Buffer{}
+
+	prefix := paddingBeforeOption
+
+	if info.indent {
+		prefix += 4
+	}
+
+	if option.Hidden {
+		return
+	}
+
+	line.WriteString(strings.Repeat(" ", prefix))
+
+	if option.ShortName != 0 {
+		line.WriteRune(defaultShortOptDelimiter)
+		line.WriteRune(option.ShortName)
+	} else if info.hasShort {
+		line.WriteString("  ")
+	}
+
+	descstart := info.descriptionStart() + paddingBeforeOption
+
+	if len(option.LongName) > 0 {
+		if option.ShortName != 0 {
+			line.WriteString(", ")
+		} else if info.hasShort {
+			line.WriteString("  ")
+		}
+
+		line.WriteString(defaultLongOptDelimiter)
+		line.WriteString(option.LongNameWithNamespace())
+	}
+
+	if option.canArgument() {
+		line.WriteRune(defaultNameArgDelimiter)
+
+		if len(option.ValueName) > 0 {
+			line.WriteString(option.ValueName)
+		}
+
+		if len(option.Choices) > 0 {
+			line.WriteString("[" + strings.Join(option.Choices, "|") + "]")
+		}
+	}
+
+	written := line.Len()
+	line.WriteTo(writer)
+
+	if option.Description != "" {
+		dw := descstart - written
+		writer.WriteString(strings.Repeat(" ", dw))
+
+		var def string
+
+		if len(option.DefaultMask) != 0 {
+			if option.DefaultMask != "-" {
+				def = option.DefaultMask
+			}
+		} else {
+			def = option.defaultLiteral
+		}
+
+		var envDef string
+		if option.EnvKeyWithNamespace() != "" {
+			var envPrintable string
+			if runtime.GOOS == "windows" {
+				envPrintable = "%" + option.EnvKeyWithNamespace() + "%"
+			} else {
+				envPrintable = "$" + option.EnvKeyWithNamespace()
+			}
+			envDef = fmt.Sprintf(" [%s]", envPrintable)
+		}
+
+		var desc string
+
+		if def != "" {
+			desc = fmt.Sprintf("%s (default: %v)%s", option.Description, def, envDef)
+		} else {
+			desc = option.Description + envDef
+		}
+
+		writer.WriteString(wrapText(desc,
+			info.terminalColumns-descstart,
+			strings.Repeat(" ", descstart)))
+	}
+
+	writer.WriteString("\n")
+}
+
+func maxCommandLength(s []*Command) int {
+	if len(s) == 0 {
+		return 0
+	}
+
+	ret := len(s[0].Name)
+
+	for _, v := range s[1:] {
+		l := len(v.Name)
+
+		if l > ret {
+			ret = l
+		}
+	}
+
+	return ret
+}
+
+// WriteHelp writes a help message containing all the possible options and
+// their descriptions to the provided writer. Note that the HelpFlag parser
+// option provides a convenient way to add a -h/--help option group to the
+// command line parser which will automatically show the help messages using
+// this method.
+func (p *Parser) WriteHelp(writer io.Writer) {
+	if writer == nil {
+		return
+	}
+
+	wr := bufio.NewWriter(writer)
+	aligninfo := p.getAlignmentInfo()
+
+	cmd := p.Command
+
+	for cmd.Active != nil {
+		cmd = cmd.Active
+	}
+
+	if p.Name != "" {
+		wr.WriteString("Usage:\n")
+		wr.WriteString(" ")
+
+		allcmd := p.Command
+
+		for allcmd != nil {
+			var usage string
+
+			if allcmd == p.Command {
+				if len(p.Usage) != 0 {
+					usage = p.Usage
+				} else if p.Options&HelpFlag != 0 {
+					usage = "[OPTIONS]"
+				}
+			} else if us, ok := allcmd.data.(Usage); ok {
+				usage = us.Usage()
+			} else if allcmd.hasHelpOptions() {
+				usage = fmt.Sprintf("[%s-OPTIONS]", allcmd.Name)
+			}
+
+			if len(usage) != 0 {
+				fmt.Fprintf(wr, " %s %s", allcmd.Name, usage)
+			} else {
+				fmt.Fprintf(wr, " %s", allcmd.Name)
+			}
+
+			if len(allcmd.args) > 0 {
+				fmt.Fprintf(wr, " ")
+			}
+
+			for i, arg := range allcmd.args {
+				if i != 0 {
+					fmt.Fprintf(wr, " ")
+				}
+
+				name := arg.Name
+
+				if arg.isRemaining() {
+					name = name + "..."
+				}
+
+				if !allcmd.ArgsRequired {
+					fmt.Fprintf(wr, "[%s]", name)
+				} else {
+					fmt.Fprintf(wr, "%s", name)
+				}
+			}
+
+			if allcmd.Active == nil && len(allcmd.commands) > 0 {
+				var co, cc string
+
+				if allcmd.SubcommandsOptional {
+					co, cc = "[", "]"
+				} else {
+					co, cc = "<", ">"
+				}
+
+				visibleCommands := allcmd.visibleCommands()
+
+				if len(visibleCommands) > 3 {
+					fmt.Fprintf(wr, " %scommand%s", co, cc)
+				} else {
+					subcommands := allcmd.sortedVisibleCommands()
+					names := make([]string, len(subcommands))
+
+					for i, subc := range subcommands {
+						names[i] = subc.Name
+					}
+
+					fmt.Fprintf(wr, " %s%s%s", co, strings.Join(names, " | "), cc)
+				}
+			}
+
+			allcmd = allcmd.Active
+		}
+
+		fmt.Fprintln(wr)
+
+		if len(cmd.LongDescription) != 0 {
+			fmt.Fprintln(wr)
+
+			t := wrapText(cmd.LongDescription,
+				aligninfo.terminalColumns,
+				"")
+
+			fmt.Fprintln(wr, t)
+		}
+	}
+
+	c := p.Command
+
+	for c != nil {
+		printcmd := c != p.Command
+
+		c.eachGroup(func(grp *Group) {
+			first := true
+
+			// Skip built-in help group for all commands except the top-level
+			// parser
+			if grp.Hidden || (grp.isBuiltinHelp && c != p.Command) {
+				return
+			}
+
+			for _, info := range grp.options {
+				if !info.showInHelp() {
+					continue
+				}
+
+				if printcmd {
+					fmt.Fprintf(wr, "\n[%s command options]\n", c.Name)
+					aligninfo.indent = true
+					printcmd = false
+				}
+
+				if first && cmd.Group != grp {
+					fmt.Fprintln(wr)
+
+					if aligninfo.indent {
+						wr.WriteString("    ")
+					}
+
+					fmt.Fprintf(wr, "%s:\n", grp.ShortDescription)
+					first = false
+				}
+
+				p.writeHelpOption(wr, info, aligninfo)
+			}
+		})
+
+		var args []*Arg
+		for _, arg := range c.args {
+			if arg.Description != "" {
+				args = append(args, arg)
+			}
+		}
+
+		if len(args) > 0 {
+			if c == p.Command {
+				fmt.Fprintf(wr, "\nArguments:\n")
+			} else {
+				fmt.Fprintf(wr, "\n[%s command arguments]\n", c.Name)
+			}
+
+			descStart := aligninfo.descriptionStart() + paddingBeforeOption
+
+			for _, arg := range args {
+				argPrefix := strings.Repeat(" ", paddingBeforeOption)
+				argPrefix += arg.Name
+
+				if len(arg.Description) > 0 {
+					argPrefix += ":"
+					wr.WriteString(argPrefix)
+
+					// Space between "arg:" and the description start
+					descPadding := strings.Repeat(" ", descStart-len(argPrefix))
+					// How much space the description gets before wrapping
+					descWidth := aligninfo.terminalColumns - 1 - descStart
+					// Whitespace to which we can indent new description lines
+					descPrefix := strings.Repeat(" ", descStart)
+
+					wr.WriteString(descPadding)
+					wr.WriteString(wrapText(arg.Description, descWidth, descPrefix))
+				} else {
+					wr.WriteString(argPrefix)
+				}
+
+				fmt.Fprintln(wr)
+			}
+		}
+
+		c = c.Active
+	}
+
+	scommands := cmd.sortedVisibleCommands()
+
+	if len(scommands) > 0 {
+		maxnamelen := maxCommandLength(scommands)
+
+		fmt.Fprintln(wr)
+		fmt.Fprintln(wr, "Available commands:")
+
+		for _, c := range scommands {
+			fmt.Fprintf(wr, "  %s", c.Name)
+
+			if len(c.ShortDescription) > 0 {
+				pad := strings.Repeat(" ", maxnamelen-len(c.Name))
+				fmt.Fprintf(wr, "%s  %s", pad, c.ShortDescription)
+
+				if len(c.Aliases) > 0 {
+					fmt.Fprintf(wr, " (aliases: %s)", strings.Join(c.Aliases, ", "))
+				}
+
+			}
+
+			fmt.Fprintln(wr)
+		}
+	}
+
+	wr.Flush()
+}
+
+// WroteHelp is a helper to test the error from ParseArgs() to
+// determine if the help message was written. It is safe to
+// call without first checking that error is nil.
+func WroteHelp(err error) bool {
+	if err == nil { // No error
+		return false
+	}
+
+	flagError, ok := err.(*Error)
+	if !ok { // Not a go-flag error
+		return false
+	}
+
+	if flagError.Type != ErrHelp { // Did not print the help message
+		return false
+	}
+
+	return true
+}
diff --git a/vendor/github.com/jessevdk/go-flags/ini.go b/vendor/github.com/jessevdk/go-flags/ini.go
new file mode 100644
index 0000000..60b36c7
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/ini.go
@@ -0,0 +1,615 @@
+package flags
+
+import (
+	"bufio"
+	"fmt"
+	"io"
+	"os"
+	"reflect"
+	"sort"
+	"strconv"
+	"strings"
+)
+
+// IniError contains location information on where an error occurred.
+type IniError struct {
+	// The error message.
+	Message string
+
+	// The filename of the file in which the error occurred.
+	File string
+
+	// The line number at which the error occurred.
+	LineNumber uint
+}
+
+// Error provides a "file:line: message" formatted message of the ini error.
+func (x *IniError) Error() string {
+	return fmt.Sprintf(
+		"%s:%d: %s",
+		x.File,
+		x.LineNumber,
+		x.Message,
+	)
+}
+
+// IniOptions for writing
+type IniOptions uint
+
+const (
+	// IniNone indicates no options.
+	IniNone IniOptions = 0
+
+	// IniIncludeDefaults indicates that default values should be written.
+	IniIncludeDefaults = 1 << iota
+
+	// IniCommentDefaults indicates that if IniIncludeDefaults is used
+	// options with default values are written but commented out.
+	IniCommentDefaults
+
+	// IniIncludeComments indicates that comments containing the description
+	// of an option should be written.
+	IniIncludeComments
+
+	// IniDefault provides a default set of options.
+	IniDefault = IniIncludeComments
+)
+
+// IniParser is a utility to read and write flags options from and to ini
+// formatted strings.
+type IniParser struct {
+	ParseAsDefaults bool // override default flags
+
+	parser *Parser
+}
+
+type iniValue struct {
+	Name       string
+	Value      string
+	Quoted     bool
+	LineNumber uint
+}
+
+type iniSection []iniValue
+
+type ini struct {
+	File     string
+	Sections map[string]iniSection
+}
+
+// NewIniParser creates a new ini parser for a given Parser.
+func NewIniParser(p *Parser) *IniParser {
+	return &IniParser{
+		parser: p,
+	}
+}
+
+// IniParse is a convenience function to parse command line options with default
+// settings from an ini formatted file. The provided data is a pointer to a struct
+// representing the default option group (named "Application Options"). For
+// more control, use flags.NewParser.
+func IniParse(filename string, data interface{}) error {
+	p := NewParser(data, Default)
+
+	return NewIniParser(p).ParseFile(filename)
+}
+
+// ParseFile parses flags from an ini formatted file. See Parse for more
+// information on the ini file format. The returned errors can be of the type
+// flags.Error or flags.IniError.
+func (i *IniParser) ParseFile(filename string) error {
+	ini, err := readIniFromFile(filename)
+
+	if err != nil {
+		return err
+	}
+
+	return i.parse(ini)
+}
+
+// Parse parses flags from an ini format. You can use ParseFile as a
+// convenience function to parse from a filename instead of a general
+// io.Reader.
+//
+// The format of the ini file is as follows:
+//
+//     [Option group name]
+//     option = value
+//
+// Each section in the ini file represents an option group or command in the
+// flags parser. The default flags parser option group (i.e. when using
+// flags.Parse) is named 'Application Options'. The ini option name is matched
+// in the following order:
+//
+//     1. Compared to the ini-name tag on the option struct field (if present)
+//     2. Compared to the struct field name
+//     3. Compared to the option long name (if present)
+//     4. Compared to the option short name (if present)
+//
+// Sections for nested groups and commands can be addressed using a dot `.'
+// namespacing notation (i.e [subcommand.Options]). Group section names are
+// matched case insensitive.
+//
+// The returned errors can be of the type flags.Error or flags.IniError.
+func (i *IniParser) Parse(reader io.Reader) error {
+	ini, err := readIni(reader, "")
+
+	if err != nil {
+		return err
+	}
+
+	return i.parse(ini)
+}
+
+// WriteFile writes the flags as ini format into a file. See Write
+// for more information. The returned error occurs when the specified file
+// could not be opened for writing.
+func (i *IniParser) WriteFile(filename string, options IniOptions) error {
+	return writeIniToFile(i, filename, options)
+}
+
+// Write writes the current values of all the flags to an ini format.
+// See Parse for more information on the ini file format. You typically
+// call this only after settings have been parsed since the default values of each
+// option are stored just before parsing the flags (this is only relevant when
+// IniIncludeDefaults is _not_ set in options).
+func (i *IniParser) Write(writer io.Writer, options IniOptions) {
+	writeIni(i, writer, options)
+}
+
+func readFullLine(reader *bufio.Reader) (string, error) {
+	var line []byte
+
+	for {
+		l, more, err := reader.ReadLine()
+
+		if err != nil {
+			return "", err
+		}
+
+		if line == nil && !more {
+			return string(l), nil
+		}
+
+		line = append(line, l...)
+
+		if !more {
+			break
+		}
+	}
+
+	return string(line), nil
+}
+
+func optionIniName(option *Option) string {
+	name := option.tag.Get("_read-ini-name")
+
+	if len(name) != 0 {
+		return name
+	}
+
+	name = option.tag.Get("ini-name")
+
+	if len(name) != 0 {
+		return name
+	}
+
+	return option.field.Name
+}
+
+func writeGroupIni(cmd *Command, group *Group, namespace string, writer io.Writer, options IniOptions) {
+	var sname string
+
+	if len(namespace) != 0 {
+		sname = namespace
+	}
+
+	if cmd.Group != group && len(group.ShortDescription) != 0 {
+		if len(sname) != 0 {
+			sname += "."
+		}
+
+		sname += group.ShortDescription
+	}
+
+	sectionwritten := false
+	comments := (options & IniIncludeComments) != IniNone
+
+	for _, option := range group.options {
+		if option.isFunc() || option.Hidden {
+			continue
+		}
+
+		if len(option.tag.Get("no-ini")) != 0 {
+			continue
+		}
+
+		val := option.value
+
+		if (options&IniIncludeDefaults) == IniNone && option.valueIsDefault() {
+			continue
+		}
+
+		if !sectionwritten {
+			fmt.Fprintf(writer, "[%s]\n", sname)
+			sectionwritten = true
+		}
+
+		if comments && len(option.Description) != 0 {
+			fmt.Fprintf(writer, "; %s\n", option.Description)
+		}
+
+		oname := optionIniName(option)
+
+		commentOption := (options&(IniIncludeDefaults|IniCommentDefaults)) == IniIncludeDefaults|IniCommentDefaults && option.valueIsDefault()
+
+		kind := val.Type().Kind()
+		switch kind {
+		case reflect.Slice:
+			kind = val.Type().Elem().Kind()
+
+			if val.Len() == 0 {
+				writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+			} else {
+				for idx := 0; idx < val.Len(); idx++ {
+					v, _ := convertToString(val.Index(idx), option.tag)
+
+					writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+				}
+			}
+		case reflect.Map:
+			kind = val.Type().Elem().Kind()
+
+			if val.Len() == 0 {
+				writeOption(writer, oname, kind, "", "", true, option.iniQuote)
+			} else {
+				mkeys := val.MapKeys()
+				keys := make([]string, len(val.MapKeys()))
+				kkmap := make(map[string]reflect.Value)
+
+				for i, k := range mkeys {
+					keys[i], _ = convertToString(k, option.tag)
+					kkmap[keys[i]] = k
+				}
+
+				sort.Strings(keys)
+
+				for _, k := range keys {
+					v, _ := convertToString(val.MapIndex(kkmap[k]), option.tag)
+
+					writeOption(writer, oname, kind, k, v, commentOption, option.iniQuote)
+				}
+			}
+		default:
+			v, _ := convertToString(val, option.tag)
+
+			writeOption(writer, oname, kind, "", v, commentOption, option.iniQuote)
+		}
+
+		if comments {
+			fmt.Fprintln(writer)
+		}
+	}
+
+	if sectionwritten && !comments {
+		fmt.Fprintln(writer)
+	}
+}
+
+func writeOption(writer io.Writer, optionName string, optionType reflect.Kind, optionKey string, optionValue string, commentOption bool, forceQuote bool) {
+	if forceQuote || (optionType == reflect.String && !isPrint(optionValue)) {
+		optionValue = strconv.Quote(optionValue)
+	}
+
+	comment := ""
+	if commentOption {
+		comment = "; "
+	}
+
+	fmt.Fprintf(writer, "%s%s =", comment, optionName)
+
+	if optionKey != "" {
+		fmt.Fprintf(writer, " %s:%s", optionKey, optionValue)
+	} else if optionValue != "" {
+		fmt.Fprintf(writer, " %s", optionValue)
+	}
+
+	fmt.Fprintln(writer)
+}
+
+func writeCommandIni(command *Command, namespace string, writer io.Writer, options IniOptions) {
+	command.eachGroup(func(group *Group) {
+		if !group.Hidden {
+			writeGroupIni(command, group, namespace, writer, options)
+		}
+	})
+
+	for _, c := range command.commands {
+		var fqn string
+
+		if c.Hidden {
+			continue
+		}
+
+		if len(namespace) != 0 {
+			fqn = namespace + "." + c.Name
+		} else {
+			fqn = c.Name
+		}
+
+		writeCommandIni(c, fqn, writer, options)
+	}
+}
+
+func writeIni(parser *IniParser, writer io.Writer, options IniOptions) {
+	writeCommandIni(parser.parser.Command, "", writer, options)
+}
+
+func writeIniToFile(parser *IniParser, filename string, options IniOptions) error {
+	file, err := os.Create(filename)
+
+	if err != nil {
+		return err
+	}
+
+	defer file.Close()
+
+	writeIni(parser, file, options)
+
+	return nil
+}
+
+func readIniFromFile(filename string) (*ini, error) {
+	file, err := os.Open(filename)
+
+	if err != nil {
+		return nil, err
+	}
+
+	defer file.Close()
+
+	return readIni(file, filename)
+}
+
+func readIni(contents io.Reader, filename string) (*ini, error) {
+	ret := &ini{
+		File:     filename,
+		Sections: make(map[string]iniSection),
+	}
+
+	reader := bufio.NewReader(contents)
+
+	// Empty global section
+	section := make(iniSection, 0, 10)
+	sectionname := ""
+
+	ret.Sections[sectionname] = section
+
+	var lineno uint
+
+	for {
+		line, err := readFullLine(reader)
+
+		if err == io.EOF {
+			break
+		} else if err != nil {
+			return nil, err
+		}
+
+		lineno++
+		line = strings.TrimSpace(line)
+
+		// Skip empty lines and lines starting with ; (comments)
+		if len(line) == 0 || line[0] == ';' || line[0] == '#' {
+			continue
+		}
+
+		if line[0] == '[' {
+			if line[0] != '[' || line[len(line)-1] != ']' {
+				return nil, &IniError{
+					Message:    "malformed section header",
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+
+			name := strings.TrimSpace(line[1 : len(line)-1])
+
+			if len(name) == 0 {
+				return nil, &IniError{
+					Message:    "empty section name",
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+
+			sectionname = name
+			section = ret.Sections[name]
+
+			if section == nil {
+				section = make(iniSection, 0, 10)
+				ret.Sections[name] = section
+			}
+
+			continue
+		}
+
+		// Parse option here
+		keyval := strings.SplitN(line, "=", 2)
+
+		if len(keyval) != 2 {
+			return nil, &IniError{
+				Message:    fmt.Sprintf("malformed key=value (%s)", line),
+				File:       filename,
+				LineNumber: lineno,
+			}
+		}
+
+		name := strings.TrimSpace(keyval[0])
+		value := strings.TrimSpace(keyval[1])
+		quoted := false
+
+		if len(value) != 0 && value[0] == '"' {
+			if v, err := strconv.Unquote(value); err == nil {
+				value = v
+
+				quoted = true
+			} else {
+				return nil, &IniError{
+					Message:    err.Error(),
+					File:       filename,
+					LineNumber: lineno,
+				}
+			}
+		}
+
+		section = append(section, iniValue{
+			Name:       name,
+			Value:      value,
+			Quoted:     quoted,
+			LineNumber: lineno,
+		})
+
+		ret.Sections[sectionname] = section
+	}
+
+	return ret, nil
+}
+
+func (i *IniParser) matchingGroups(name string) []*Group {
+	if len(name) == 0 {
+		var ret []*Group
+
+		i.parser.eachGroup(func(g *Group) {
+			ret = append(ret, g)
+		})
+
+		return ret
+	}
+
+	g := i.parser.groupByName(name)
+
+	if g != nil {
+		return []*Group{g}
+	}
+
+	return nil
+}
+
+func (i *IniParser) parse(ini *ini) error {
+	p := i.parser
+
+	p.eachOption(func(cmd *Command, group *Group, option *Option) {
+		option.clearReferenceBeforeSet = true
+	})
+
+	var quotesLookup = make(map[*Option]bool)
+
+	for name, section := range ini.Sections {
+		groups := i.matchingGroups(name)
+
+		if len(groups) == 0 {
+			if (p.Options & IgnoreUnknown) == None {
+				return newErrorf(ErrUnknownGroup, "could not find option group `%s'", name)
+			}
+
+			continue
+		}
+
+		for _, inival := range section {
+			var opt *Option
+
+			for _, group := range groups {
+				opt = group.optionByName(inival.Name, func(o *Option, n string) bool {
+					return strings.ToLower(o.tag.Get("ini-name")) == strings.ToLower(n)
+				})
+
+				if opt != nil && len(opt.tag.Get("no-ini")) != 0 {
+					opt = nil
+				}
+
+				if opt != nil {
+					break
+				}
+			}
+
+			if opt == nil {
+				if (p.Options & IgnoreUnknown) == None {
+					return &IniError{
+						Message:    fmt.Sprintf("unknown option: %s", inival.Name),
+						File:       ini.File,
+						LineNumber: inival.LineNumber,
+					}
+				}
+
+				continue
+			}
+
+			// ini value is ignored if parsed as default but defaults are prevented
+			if i.ParseAsDefaults && opt.preventDefault {
+				continue
+			}
+
+			pval := &inival.Value
+
+			if !opt.canArgument() && len(inival.Value) == 0 {
+				pval = nil
+			} else {
+				if opt.value.Type().Kind() == reflect.Map {
+					parts := strings.SplitN(inival.Value, ":", 2)
+
+					// only handle unquoting
+					if len(parts) == 2 && parts[1][0] == '"' {
+						if v, err := strconv.Unquote(parts[1]); err == nil {
+							parts[1] = v
+
+							inival.Quoted = true
+						} else {
+							return &IniError{
+								Message:    err.Error(),
+								File:       ini.File,
+								LineNumber: inival.LineNumber,
+							}
+						}
+
+						s := parts[0] + ":" + parts[1]
+
+						pval = &s
+					}
+				}
+			}
+
+			var err error
+
+			if i.ParseAsDefaults {
+				err = opt.setDefault(pval)
+			} else {
+				err = opt.set(pval)
+			}
+
+			if err != nil {
+				return &IniError{
+					Message:    err.Error(),
+					File:       ini.File,
+					LineNumber: inival.LineNumber,
+				}
+			}
+
+			// Defaults from ini files take precendence over defaults from parser
+			opt.preventDefault = true
+
+			// either all INI values are quoted or only values who need quoting
+			if _, ok := quotesLookup[opt]; !inival.Quoted || !ok {
+				quotesLookup[opt] = inival.Quoted
+			}
+
+			opt.tag.Set("_read-ini-name", inival.Name)
+		}
+	}
+
+	for opt, quoted := range quotesLookup {
+		opt.iniQuote = quoted
+	}
+
+	return nil
+}
diff --git a/vendor/github.com/jessevdk/go-flags/man.go b/vendor/github.com/jessevdk/go-flags/man.go
new file mode 100644
index 0000000..82572f9
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/man.go
@@ -0,0 +1,223 @@
+package flags
+
+import (
+	"fmt"
+	"io"
+	"os"
+	"runtime"
+	"strconv"
+	"strings"
+	"time"
+)
+
+func manQuoteLines(s string) string {
+	lines := strings.Split(s, "\n")
+	parts := []string{}
+
+	for _, line := range lines {
+		parts = append(parts, manQuote(line))
+	}
+
+	return strings.Join(parts, "\n")
+}
+
+func manQuote(s string) string {
+	return strings.Replace(s, "\\", "\\\\", -1)
+}
+
+func formatForMan(wr io.Writer, s string, quoter func(s string) string) {
+	for {
+		idx := strings.IndexRune(s, '`')
+
+		if idx < 0 {
+			fmt.Fprintf(wr, "%s", quoter(s))
+			break
+		}
+
+		fmt.Fprintf(wr, "%s", quoter(s[:idx]))
+
+		s = s[idx+1:]
+		idx = strings.IndexRune(s, '\'')
+
+		if idx < 0 {
+			fmt.Fprintf(wr, "%s", quoter(s))
+			break
+		}
+
+		fmt.Fprintf(wr, "\\fB%s\\fP", quoter(s[:idx]))
+		s = s[idx+1:]
+	}
+}
+
+func writeManPageOptions(wr io.Writer, grp *Group) {
+	grp.eachGroup(func(group *Group) {
+		if !group.showInHelp() {
+			return
+		}
+
+		// If the parent (grp) has any subgroups, display their descriptions as
+		// subsection headers similar to the output of --help.
+		if group.ShortDescription != "" && len(grp.groups) > 0 {
+			fmt.Fprintf(wr, ".SS %s\n", group.ShortDescription)
+
+			if group.LongDescription != "" {
+				formatForMan(wr, group.LongDescription, manQuoteLines)
+				fmt.Fprintln(wr, "")
+			}
+		}
+
+		for _, opt := range group.options {
+			if !opt.showInHelp() {
+				continue
+			}
+
+			fmt.Fprintln(wr, ".TP")
+			fmt.Fprintf(wr, "\\fB")
+
+			if opt.ShortName != 0 {
+				fmt.Fprintf(wr, "\\fB\\-%c\\fR", opt.ShortName)
+			}
+
+			if len(opt.LongName) != 0 {
+				if opt.ShortName != 0 {
+					fmt.Fprintf(wr, ", ")
+				}
+
+				fmt.Fprintf(wr, "\\fB\\-\\-%s\\fR", manQuote(opt.LongNameWithNamespace()))
+			}
+
+			if len(opt.ValueName) != 0 || opt.OptionalArgument {
+				if opt.OptionalArgument {
+					fmt.Fprintf(wr, " [\\fI%s=%s\\fR]", manQuote(opt.ValueName), manQuote(strings.Join(quoteV(opt.OptionalValue), ", ")))
+				} else {
+					fmt.Fprintf(wr, " \\fI%s\\fR", manQuote(opt.ValueName))
+				}
+			}
+
+			if len(opt.Default) != 0 {
+				fmt.Fprintf(wr, " <default: \\fI%s\\fR>", manQuote(strings.Join(quoteV(opt.Default), ", ")))
+			} else if len(opt.EnvKeyWithNamespace()) != 0 {
+				if runtime.GOOS == "windows" {
+					fmt.Fprintf(wr, " <default: \\fI%%%s%%\\fR>", manQuote(opt.EnvKeyWithNamespace()))
+				} else {
+					fmt.Fprintf(wr, " <default: \\fI$%s\\fR>", manQuote(opt.EnvKeyWithNamespace()))
+				}
+			}
+
+			if opt.Required {
+				fmt.Fprintf(wr, " (\\fIrequired\\fR)")
+			}
+
+			fmt.Fprintln(wr, "\\fP")
+
+			if len(opt.Description) != 0 {
+				formatForMan(wr, opt.Description, manQuoteLines)
+				fmt.Fprintln(wr, "")
+			}
+		}
+	})
+}
+
+func writeManPageSubcommands(wr io.Writer, name string, usagePrefix string, root *Command) {
+	commands := root.sortedVisibleCommands()
+
+	for _, c := range commands {
+		var nn string
+
+		if c.Hidden {
+			continue
+		}
+
+		if len(name) != 0 {
+			nn = name + " " + c.Name
+		} else {
+			nn = c.Name
+		}
+
+		writeManPageCommand(wr, nn, usagePrefix, c)
+	}
+}
+
+func writeManPageCommand(wr io.Writer, name string, usagePrefix string, command *Command) {
+	fmt.Fprintf(wr, ".SS %s\n", name)
+	fmt.Fprintln(wr, command.ShortDescription)
+
+	if len(command.LongDescription) > 0 {
+		fmt.Fprintln(wr, "")
+
+		cmdstart := fmt.Sprintf("The %s command", manQuote(command.Name))
+
+		if strings.HasPrefix(command.LongDescription, cmdstart) {
+			fmt.Fprintf(wr, "The \\fI%s\\fP command", manQuote(command.Name))
+
+			formatForMan(wr, command.LongDescription[len(cmdstart):], manQuoteLines)
+			fmt.Fprintln(wr, "")
+		} else {
+			formatForMan(wr, command.LongDescription, manQuoteLines)
+			fmt.Fprintln(wr, "")
+		}
+	}
+
+	var pre = usagePrefix + " " + command.Name
+
+	var usage string
+	if us, ok := command.data.(Usage); ok {
+		usage = us.Usage()
+	} else if command.hasHelpOptions() {
+		usage = fmt.Sprintf("[%s-OPTIONS]", command.Name)
+	}
+
+	var nextPrefix = pre
+	if len(usage) > 0 {
+		fmt.Fprintf(wr, "\n\\fBUsage\\fP: %s %s\n.TP\n", manQuote(pre), manQuote(usage))
+		nextPrefix = pre + " " + usage
+	}
+
+	if len(command.Aliases) > 0 {
+		fmt.Fprintf(wr, "\n\\fBAliases\\fP: %s\n\n", manQuote(strings.Join(command.Aliases, ", ")))
+	}
+
+	writeManPageOptions(wr, command.Group)
+	writeManPageSubcommands(wr, name, nextPrefix, command)
+}
+
+// WriteManPage writes a basic man page in groff format to the specified
+// writer.
+func (p *Parser) WriteManPage(wr io.Writer) {
+	t := time.Now()
+	source_date_epoch := os.Getenv("SOURCE_DATE_EPOCH")
+	if source_date_epoch != "" {
+		sde, err := strconv.ParseInt(source_date_epoch, 10, 64)
+		if err != nil {
+			panic(fmt.Sprintf("Invalid SOURCE_DATE_EPOCH: %s", err))
+		}
+		t = time.Unix(sde, 0)
+	}
+
+	fmt.Fprintf(wr, ".TH %s 1 \"%s\"\n", manQuote(p.Name), t.Format("2 January 2006"))
+	fmt.Fprintln(wr, ".SH NAME")
+	fmt.Fprintf(wr, "%s \\- %s\n", manQuote(p.Name), manQuoteLines(p.ShortDescription))
+	fmt.Fprintln(wr, ".SH SYNOPSIS")
+
+	usage := p.Usage
+
+	if len(usage) == 0 {
+		usage = "[OPTIONS]"
+	}
+
+	fmt.Fprintf(wr, "\\fB%s\\fP %s\n", manQuote(p.Name), manQuote(usage))
+	fmt.Fprintln(wr, ".SH DESCRIPTION")
+
+	formatForMan(wr, p.LongDescription, manQuoteLines)
+	fmt.Fprintln(wr, "")
+
+	fmt.Fprintln(wr, ".SH OPTIONS")
+
+	writeManPageOptions(wr, p.Command.Group)
+
+	if len(p.visibleCommands()) > 0 {
+		fmt.Fprintln(wr, ".SH COMMANDS")
+
+		writeManPageSubcommands(wr, "", p.Name+" "+usage, p.Command)
+	}
+}
diff --git a/vendor/github.com/jessevdk/go-flags/multitag.go b/vendor/github.com/jessevdk/go-flags/multitag.go
new file mode 100644
index 0000000..96bb1a3
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/multitag.go
@@ -0,0 +1,140 @@
+package flags
+
+import (
+	"strconv"
+)
+
+type multiTag struct {
+	value string
+	cache map[string][]string
+}
+
+func newMultiTag(v string) multiTag {
+	return multiTag{
+		value: v,
+	}
+}
+
+func (x *multiTag) scan() (map[string][]string, error) {
+	v := x.value
+
+	ret := make(map[string][]string)
+
+	// This is mostly copied from reflect.StructTag.Get
+	for v != "" {
+		i := 0
+
+		// Skip whitespace
+		for i < len(v) && v[i] == ' ' {
+			i++
+		}
+
+		v = v[i:]
+
+		if v == "" {
+			break
+		}
+
+		// Scan to colon to find key
+		i = 0
+
+		for i < len(v) && v[i] != ' ' && v[i] != ':' && v[i] != '"' {
+			i++
+		}
+
+		if i >= len(v) {
+			return nil, newErrorf(ErrTag, "expected `:' after key name, but got end of tag (in `%v`)", x.value)
+		}
+
+		if v[i] != ':' {
+			return nil, newErrorf(ErrTag, "expected `:' after key name, but got `%v' (in `%v`)", v[i], x.value)
+		}
+
+		if i+1 >= len(v) {
+			return nil, newErrorf(ErrTag, "expected `\"' to start tag value at end of tag (in `%v`)", x.value)
+		}
+
+		if v[i+1] != '"' {
+			return nil, newErrorf(ErrTag, "expected `\"' to start tag value, but got `%v' (in `%v`)", v[i+1], x.value)
+		}
+
+		name := v[:i]
+		v = v[i+1:]
+
+		// Scan quoted string to find value
+		i = 1
+
+		for i < len(v) && v[i] != '"' {
+			if v[i] == '\n' {
+				return nil, newErrorf(ErrTag, "unexpected newline in tag value `%v' (in `%v`)", name, x.value)
+			}
+
+			if v[i] == '\\' {
+				i++
+			}
+			i++
+		}
+
+		if i >= len(v) {
+			return nil, newErrorf(ErrTag, "expected end of tag value `\"' at end of tag (in `%v`)", x.value)
+		}
+
+		val, err := strconv.Unquote(v[:i+1])
+
+		if err != nil {
+			return nil, newErrorf(ErrTag, "Malformed value of tag `%v:%v` => %v (in `%v`)", name, v[:i+1], err, x.value)
+		}
+
+		v = v[i+1:]
+
+		ret[name] = append(ret[name], val)
+	}
+
+	return ret, nil
+}
+
+func (x *multiTag) Parse() error {
+	vals, err := x.scan()
+	x.cache = vals
+
+	return err
+}
+
+func (x *multiTag) cached() map[string][]string {
+	if x.cache == nil {
+		cache, _ := x.scan()
+
+		if cache == nil {
+			cache = make(map[string][]string)
+		}
+
+		x.cache = cache
+	}
+
+	return x.cache
+}
+
+func (x *multiTag) Get(key string) string {
+	c := x.cached()
+
+	if v, ok := c[key]; ok {
+		return v[len(v)-1]
+	}
+
+	return ""
+}
+
+func (x *multiTag) GetMany(key string) []string {
+	c := x.cached()
+	return c[key]
+}
+
+func (x *multiTag) Set(key string, value string) {
+	c := x.cached()
+	c[key] = []string{value}
+}
+
+func (x *multiTag) SetMany(key string, value []string) {
+	c := x.cached()
+	c[key] = value
+}
diff --git a/vendor/github.com/jessevdk/go-flags/option.go b/vendor/github.com/jessevdk/go-flags/option.go
new file mode 100644
index 0000000..f6d6941
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/option.go
@@ -0,0 +1,569 @@
+package flags
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"reflect"
+	"strings"
+	"unicode/utf8"
+)
+
+// Option flag information. Contains a description of the option, short and
+// long name as well as a default value and whether an argument for this
+// flag is optional.
+type Option struct {
+	// The description of the option flag. This description is shown
+	// automatically in the built-in help.
+	Description string
+
+	// The short name of the option (a single character). If not 0, the
+	// option flag can be 'activated' using -<ShortName>. Either ShortName
+	// or LongName needs to be non-empty.
+	ShortName rune
+
+	// The long name of the option. If not "", the option flag can be
+	// activated using --<LongName>. Either ShortName or LongName needs
+	// to be non-empty.
+	LongName string
+
+	// The default value of the option.
+	Default []string
+
+	// The optional environment default value key name.
+	EnvDefaultKey string
+
+	// The optional delimiter string for EnvDefaultKey values.
+	EnvDefaultDelim string
+
+	// If true, specifies that the argument to an option flag is optional.
+	// When no argument to the flag is specified on the command line, the
+	// value of OptionalValue will be set in the field this option represents.
+	// This is only valid for non-boolean options.
+	OptionalArgument bool
+
+	// The optional value of the option. The optional value is used when
+	// the option flag is marked as having an OptionalArgument. This means
+	// that when the flag is specified, but no option argument is given,
+	// the value of the field this option represents will be set to
+	// OptionalValue. This is only valid for non-boolean options.
+	OptionalValue []string
+
+	// If true, the option _must_ be specified on the command line. If the
+	// option is not specified, the parser will generate an ErrRequired type
+	// error.
+	Required bool
+
+	// A name for the value of an option shown in the Help as --flag [ValueName]
+	ValueName string
+
+	// A mask value to show in the help instead of the default value. This
+	// is useful for hiding sensitive information in the help, such as
+	// passwords.
+	DefaultMask string
+
+	// If non empty, only a certain set of values is allowed for an option.
+	Choices []string
+
+	// If true, the option is not displayed in the help or man page
+	Hidden bool
+
+	// The group which the option belongs to
+	group *Group
+
+	// The struct field which the option represents.
+	field reflect.StructField
+
+	// The struct field value which the option represents.
+	value reflect.Value
+
+	// Determines if the option will be always quoted in the INI output
+	iniQuote bool
+
+	tag                     multiTag
+	isSet                   bool
+	isSetDefault            bool
+	preventDefault          bool
+	clearReferenceBeforeSet bool
+
+	defaultLiteral string
+}
+
+// LongNameWithNamespace returns the option's long name with the group namespaces
+// prepended by walking up the option's group tree. Namespaces and the long name
+// itself are separated by the parser's namespace delimiter. If the long name is
+// empty an empty string is returned.
+func (option *Option) LongNameWithNamespace() string {
+	if len(option.LongName) == 0 {
+		return ""
+	}
+
+	// fetch the namespace delimiter from the parser which is always at the
+	// end of the group hierarchy
+	namespaceDelimiter := ""
+	g := option.group
+
+	for {
+		if p, ok := g.parent.(*Parser); ok {
+			namespaceDelimiter = p.NamespaceDelimiter
+
+			break
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		}
+	}
+
+	// concatenate long name with namespace
+	longName := option.LongName
+	g = option.group
+
+	for g != nil {
+		if g.Namespace != "" {
+			longName = g.Namespace + namespaceDelimiter + longName
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		case *Parser:
+			g = nil
+		}
+	}
+
+	return longName
+}
+
+// EnvKeyWithNamespace returns the option's env key with the group namespaces
+// prepended by walking up the option's group tree. Namespaces and the env key
+// itself are separated by the parser's namespace delimiter. If the env key is
+// empty an empty string is returned.
+func (option *Option) EnvKeyWithNamespace() string {
+	if len(option.EnvDefaultKey) == 0 {
+		return ""
+	}
+
+	// fetch the namespace delimiter from the parser which is always at the
+	// end of the group hierarchy
+	namespaceDelimiter := ""
+	g := option.group
+
+	for {
+		if p, ok := g.parent.(*Parser); ok {
+			namespaceDelimiter = p.EnvNamespaceDelimiter
+
+			break
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		}
+	}
+
+	// concatenate long name with namespace
+	key := option.EnvDefaultKey
+	g = option.group
+
+	for g != nil {
+		if g.EnvNamespace != "" {
+			key = g.EnvNamespace + namespaceDelimiter + key
+		}
+
+		switch i := g.parent.(type) {
+		case *Command:
+			g = i.Group
+		case *Group:
+			g = i
+		case *Parser:
+			g = nil
+		}
+	}
+
+	return key
+}
+
+// String converts an option to a human friendly readable string describing the
+// option.
+func (option *Option) String() string {
+	var s string
+	var short string
+
+	if option.ShortName != 0 {
+		data := make([]byte, utf8.RuneLen(option.ShortName))
+		utf8.EncodeRune(data, option.ShortName)
+		short = string(data)
+
+		if len(option.LongName) != 0 {
+			s = fmt.Sprintf("%s%s, %s%s",
+				string(defaultShortOptDelimiter), short,
+				defaultLongOptDelimiter, option.LongNameWithNamespace())
+		} else {
+			s = fmt.Sprintf("%s%s", string(defaultShortOptDelimiter), short)
+		}
+	} else if len(option.LongName) != 0 {
+		s = fmt.Sprintf("%s%s", defaultLongOptDelimiter, option.LongNameWithNamespace())
+	}
+
+	return s
+}
+
+// Value returns the option value as an interface{}.
+func (option *Option) Value() interface{} {
+	return option.value.Interface()
+}
+
+// Field returns the reflect struct field of the option.
+func (option *Option) Field() reflect.StructField {
+	return option.field
+}
+
+// IsSet returns true if option has been set
+func (option *Option) IsSet() bool {
+	return option.isSet
+}
+
+// IsSetDefault returns true if option has been set via the default option tag
+func (option *Option) IsSetDefault() bool {
+	return option.isSetDefault
+}
+
+// Set the value of an option to the specified value. An error will be returned
+// if the specified value could not be converted to the corresponding option
+// value type.
+func (option *Option) set(value *string) error {
+	kind := option.value.Type().Kind()
+
+	if (kind == reflect.Map || kind == reflect.Slice) && option.clearReferenceBeforeSet {
+		option.empty()
+	}
+
+	option.isSet = true
+	option.preventDefault = true
+	option.clearReferenceBeforeSet = false
+
+	if len(option.Choices) != 0 {
+		found := false
+
+		for _, choice := range option.Choices {
+			if choice == *value {
+				found = true
+				break
+			}
+		}
+
+		if !found {
+			allowed := strings.Join(option.Choices[0:len(option.Choices)-1], ", ")
+
+			if len(option.Choices) > 1 {
+				allowed += " or " + option.Choices[len(option.Choices)-1]
+			}
+
+			return newErrorf(ErrInvalidChoice,
+				"Invalid value `%s' for option `%s'. Allowed values are: %s",
+				*value, option, allowed)
+		}
+	}
+
+	if option.isFunc() {
+		return option.call(value)
+	} else if value != nil {
+		return convert(*value, option.value, option.tag)
+	}
+
+	return convert("", option.value, option.tag)
+}
+
+func (option *Option) setDefault(value *string) error {
+	if option.preventDefault {
+		return nil
+	}
+
+	if err := option.set(value); err != nil {
+		return err
+	}
+
+	option.isSetDefault = true
+	option.preventDefault = false
+
+	return nil
+}
+
+func (option *Option) showInHelp() bool {
+	return !option.Hidden && (option.ShortName != 0 || len(option.LongName) != 0)
+}
+
+func (option *Option) canArgument() bool {
+	if u := option.isUnmarshaler(); u != nil {
+		return true
+	}
+
+	return !option.isBool()
+}
+
+func (option *Option) emptyValue() reflect.Value {
+	tp := option.value.Type()
+
+	if tp.Kind() == reflect.Map {
+		return reflect.MakeMap(tp)
+	}
+
+	return reflect.Zero(tp)
+}
+
+func (option *Option) empty() {
+	if !option.isFunc() {
+		option.value.Set(option.emptyValue())
+	}
+}
+
+func (option *Option) clearDefault() error {
+	if option.preventDefault {
+		return nil
+	}
+
+	usedDefault := option.Default
+
+	if envKey := option.EnvKeyWithNamespace(); envKey != "" {
+		if value, ok := os.LookupEnv(envKey); ok {
+			if option.EnvDefaultDelim != "" {
+				usedDefault = strings.Split(value, option.EnvDefaultDelim)
+			} else {
+				usedDefault = []string{value}
+			}
+		}
+	}
+
+	option.isSetDefault = true
+
+	if len(usedDefault) > 0 {
+		option.empty()
+
+		for _, d := range usedDefault {
+			err := option.setDefault(&d)
+
+			if err != nil {
+				return err
+			}
+		}
+	} else {
+		tp := option.value.Type()
+
+		switch tp.Kind() {
+		case reflect.Map:
+			if option.value.IsNil() {
+				option.empty()
+			}
+		case reflect.Slice:
+			if option.value.IsNil() {
+				option.empty()
+			}
+		}
+	}
+
+	return nil
+}
+
+func (option *Option) valueIsDefault() bool {
+	// Check if the value of the option corresponds to its
+	// default value
+	emptyval := option.emptyValue()
+
+	checkvalptr := reflect.New(emptyval.Type())
+	checkval := reflect.Indirect(checkvalptr)
+
+	checkval.Set(emptyval)
+
+	if len(option.Default) != 0 {
+		for _, v := range option.Default {
+			convert(v, checkval, option.tag)
+		}
+	}
+
+	return reflect.DeepEqual(option.value.Interface(), checkval.Interface())
+}
+
+func (option *Option) isUnmarshaler() Unmarshaler {
+	v := option.value
+
+	for {
+		if !v.CanInterface() {
+			break
+		}
+
+		i := v.Interface()
+
+		if u, ok := i.(Unmarshaler); ok {
+			return u
+		}
+
+		if !v.CanAddr() {
+			break
+		}
+
+		v = v.Addr()
+	}
+
+	return nil
+}
+
+func (option *Option) isValueValidator() ValueValidator {
+	v := option.value
+
+	for {
+		if !v.CanInterface() {
+			break
+		}
+
+		i := v.Interface()
+
+		if u, ok := i.(ValueValidator); ok {
+			return u
+		}
+
+		if !v.CanAddr() {
+			break
+		}
+
+		v = v.Addr()
+	}
+
+	return nil
+}
+
+func (option *Option) isBool() bool {
+	tp := option.value.Type()
+
+	for {
+		switch tp.Kind() {
+		case reflect.Slice, reflect.Ptr:
+			tp = tp.Elem()
+		case reflect.Bool:
+			return true
+		case reflect.Func:
+			return tp.NumIn() == 0
+		default:
+			return false
+		}
+	}
+}
+
+func (option *Option) isSignedNumber() bool {
+	tp := option.value.Type()
+
+	for {
+		switch tp.Kind() {
+		case reflect.Slice, reflect.Ptr:
+			tp = tp.Elem()
+		case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Float32, reflect.Float64:
+			return true
+		default:
+			return false
+		}
+	}
+}
+
+func (option *Option) isFunc() bool {
+	return option.value.Type().Kind() == reflect.Func
+}
+
+func (option *Option) call(value *string) error {
+	var retval []reflect.Value
+
+	if value == nil {
+		retval = option.value.Call(nil)
+	} else {
+		tp := option.value.Type().In(0)
+
+		val := reflect.New(tp)
+		val = reflect.Indirect(val)
+
+		if err := convert(*value, val, option.tag); err != nil {
+			return err
+		}
+
+		retval = option.value.Call([]reflect.Value{val})
+	}
+
+	if len(retval) == 1 && retval[0].Type() == reflect.TypeOf((*error)(nil)).Elem() {
+		if retval[0].Interface() == nil {
+			return nil
+		}
+
+		return retval[0].Interface().(error)
+	}
+
+	return nil
+}
+
+func (option *Option) updateDefaultLiteral() {
+	defs := option.Default
+	def := ""
+
+	if len(defs) == 0 && option.canArgument() {
+		var showdef bool
+
+		switch option.field.Type.Kind() {
+		case reflect.Func, reflect.Ptr:
+			showdef = !option.value.IsNil()
+		case reflect.Slice, reflect.String, reflect.Array:
+			showdef = option.value.Len() > 0
+		case reflect.Map:
+			showdef = !option.value.IsNil() && option.value.Len() > 0
+		default:
+			zeroval := reflect.Zero(option.field.Type)
+			showdef = !reflect.DeepEqual(zeroval.Interface(), option.value.Interface())
+		}
+
+		if showdef {
+			def, _ = convertToString(option.value, option.tag)
+		}
+	} else if len(defs) != 0 {
+		l := len(defs) - 1
+
+		for i := 0; i < l; i++ {
+			def += quoteIfNeeded(defs[i]) + ", "
+		}
+
+		def += quoteIfNeeded(defs[l])
+	}
+
+	option.defaultLiteral = def
+}
+
+func (option *Option) shortAndLongName() string {
+	ret := &bytes.Buffer{}
+
+	if option.ShortName != 0 {
+		ret.WriteRune(defaultShortOptDelimiter)
+		ret.WriteRune(option.ShortName)
+	}
+
+	if len(option.LongName) != 0 {
+		if option.ShortName != 0 {
+			ret.WriteRune('/')
+		}
+
+		ret.WriteString(option.LongName)
+	}
+
+	return ret.String()
+}
+
+func (option *Option) isValidValue(arg string) error {
+	if validator := option.isValueValidator(); validator != nil {
+		return validator.IsValidValue(arg)
+	}
+	if argumentIsOption(arg) && !(option.isSignedNumber() && len(arg) > 1 && arg[0] == '-' && arg[1] >= '0' && arg[1] <= '9') {
+		return fmt.Errorf("expected argument for flag `%s', but got option `%s'", option, arg)
+	}
+	return nil
+}
diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_other.go b/vendor/github.com/jessevdk/go-flags/optstyle_other.go
new file mode 100644
index 0000000..56dfdae
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/optstyle_other.go
@@ -0,0 +1,67 @@
+// +build !windows forceposix
+
+package flags
+
+import (
+	"strings"
+)
+
+const (
+	defaultShortOptDelimiter = '-'
+	defaultLongOptDelimiter  = "--"
+	defaultNameArgDelimiter  = '='
+)
+
+func argumentStartsOption(arg string) bool {
+	return len(arg) > 0 && arg[0] == '-'
+}
+
+func argumentIsOption(arg string) bool {
+	if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+		return true
+	}
+
+	if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+		return true
+	}
+
+	return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+	if strings.HasPrefix(optname, "--") {
+		return "--", optname[2:], true
+	} else if strings.HasPrefix(optname, "-") {
+		return "-", optname[1:], false
+	}
+
+	return "", optname, false
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+	pos := strings.Index(option, "=")
+
+	if (islong && pos >= 0) || (!islong && pos == 1) {
+		rest := option[pos+1:]
+		return option[:pos], "=", &rest
+	}
+
+	return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+	var help struct {
+		ShowHelp func() error `short:"h" long:"help" description:"Show this help message"`
+	}
+
+	help.ShowHelp = showHelp
+	ret, _ := c.AddGroup("Help Options", "", &help)
+	ret.isBuiltinHelp = true
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/optstyle_windows.go b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go
new file mode 100644
index 0000000..f3f28ae
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/optstyle_windows.go
@@ -0,0 +1,108 @@
+// +build !forceposix
+
+package flags
+
+import (
+	"strings"
+)
+
+// Windows uses a front slash for both short and long options.  Also it uses
+// a colon for name/argument delimter.
+const (
+	defaultShortOptDelimiter = '/'
+	defaultLongOptDelimiter  = "/"
+	defaultNameArgDelimiter  = ':'
+)
+
+func argumentStartsOption(arg string) bool {
+	return len(arg) > 0 && (arg[0] == '-' || arg[0] == '/')
+}
+
+func argumentIsOption(arg string) bool {
+	// Windows-style options allow front slash for the option
+	// delimiter.
+	if len(arg) > 1 && arg[0] == '/' {
+		return true
+	}
+
+	if len(arg) > 1 && arg[0] == '-' && arg[1] != '-' {
+		return true
+	}
+
+	if len(arg) > 2 && arg[0] == '-' && arg[1] == '-' && arg[2] != '-' {
+		return true
+	}
+
+	return false
+}
+
+// stripOptionPrefix returns the option without the prefix and whether or
+// not the option is a long option or not.
+func stripOptionPrefix(optname string) (prefix string, name string, islong bool) {
+	// Determine if the argument is a long option or not.  Windows
+	// typically supports both long and short options with a single
+	// front slash as the option delimiter, so handle this situation
+	// nicely.
+	possplit := 0
+
+	if strings.HasPrefix(optname, "--") {
+		possplit = 2
+		islong = true
+	} else if strings.HasPrefix(optname, "-") {
+		possplit = 1
+		islong = false
+	} else if strings.HasPrefix(optname, "/") {
+		possplit = 1
+		islong = len(optname) > 2
+	}
+
+	return optname[:possplit], optname[possplit:], islong
+}
+
+// splitOption attempts to split the passed option into a name and an argument.
+// When there is no argument specified, nil will be returned for it.
+func splitOption(prefix string, option string, islong bool) (string, string, *string) {
+	if len(option) == 0 {
+		return option, "", nil
+	}
+
+	// Windows typically uses a colon for the option name and argument
+	// delimiter while POSIX typically uses an equals.  Support both styles,
+	// but don't allow the two to be mixed.  That is to say /foo:bar and
+	// --foo=bar are acceptable, but /foo=bar and --foo:bar are not.
+	var pos int
+	var sp string
+
+	if prefix == "/" {
+		sp = ":"
+		pos = strings.Index(option, sp)
+	} else if len(prefix) > 0 {
+		sp = "="
+		pos = strings.Index(option, sp)
+	}
+
+	if (islong && pos >= 0) || (!islong && pos == 1) {
+		rest := option[pos+1:]
+		return option[:pos], sp, &rest
+	}
+
+	return option, "", nil
+}
+
+// addHelpGroup adds a new group that contains default help parameters.
+func (c *Command) addHelpGroup(showHelp func() error) *Group {
+	// Windows CLI applications typically use /? for help, so make both
+	// that available as well as the POSIX style h and help.
+	var help struct {
+		ShowHelpWindows func() error `short:"?" description:"Show this help message"`
+		ShowHelpPosix   func() error `short:"h" long:"help" description:"Show this help message"`
+	}
+
+	help.ShowHelpWindows = showHelp
+	help.ShowHelpPosix = showHelp
+
+	ret, _ := c.AddGroup("Help Options", "", &help)
+	ret.isBuiltinHelp = true
+
+	return ret
+}
diff --git a/vendor/github.com/jessevdk/go-flags/parser.go b/vendor/github.com/jessevdk/go-flags/parser.go
new file mode 100644
index 0000000..3fc3f7b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/parser.go
@@ -0,0 +1,714 @@
+// Copyright 2012 Jesse van den Kieboom. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package flags
+
+import (
+	"bytes"
+	"fmt"
+	"os"
+	"path"
+	"reflect"
+	"sort"
+	"strings"
+	"unicode/utf8"
+)
+
+// A Parser provides command line option parsing. It can contain several
+// option groups each with their own set of options.
+type Parser struct {
+	// Embedded, see Command for more information
+	*Command
+
+	// A usage string to be displayed in the help message.
+	Usage string
+
+	// Option flags changing the behavior of the parser.
+	Options Options
+
+	// NamespaceDelimiter separates group namespaces and option long names
+	NamespaceDelimiter string
+
+	// EnvNamespaceDelimiter separates group env namespaces and env keys
+	EnvNamespaceDelimiter string
+
+	// UnknownOptionsHandler is a function which gets called when the parser
+	// encounters an unknown option. The function receives the unknown option
+	// name, a SplitArgument which specifies its value if set with an argument
+	// separator, and the remaining command line arguments.
+	// It should return a new list of remaining arguments to continue parsing,
+	// or an error to indicate a parse failure.
+	UnknownOptionHandler func(option string, arg SplitArgument, args []string) ([]string, error)
+
+	// CompletionHandler is a function gets called to handle the completion of
+	// items. By default, the items are printed and the application is exited.
+	// You can override this default behavior by specifying a custom CompletionHandler.
+	CompletionHandler func(items []Completion)
+
+	// CommandHandler is a function that gets called to handle execution of a
+	// command. By default, the command will simply be executed. This can be
+	// overridden to perform certain actions (such as applying global flags)
+	// just before the command is executed. Note that if you override the
+	// handler it is your responsibility to call the command.Execute function.
+	//
+	// The command passed into CommandHandler may be nil in case there is no
+	// command to be executed when parsing has finished.
+	CommandHandler func(command Commander, args []string) error
+
+	internalError error
+}
+
+// SplitArgument represents the argument value of an option that was passed using
+// an argument separator.
+type SplitArgument interface {
+	// String returns the option's value as a string, and a boolean indicating
+	// if the option was present.
+	Value() (string, bool)
+}
+
+type strArgument struct {
+	value *string
+}
+
+func (s strArgument) Value() (string, bool) {
+	if s.value == nil {
+		return "", false
+	}
+
+	return *s.value, true
+}
+
+// Options provides parser options that change the behavior of the option
+// parser.
+type Options uint
+
+const (
+	// None indicates no options.
+	None Options = 0
+
+	// HelpFlag adds a default Help Options group to the parser containing
+	// -h and --help options. When either -h or --help is specified on the
+	// command line, the parser will return the special error of type
+	// ErrHelp. When PrintErrors is also specified, then the help message
+	// will also be automatically printed to os.Stdout.
+	HelpFlag = 1 << iota
+
+	// PassDoubleDash passes all arguments after a double dash, --, as
+	// remaining command line arguments (i.e. they will not be parsed for
+	// flags).
+	PassDoubleDash
+
+	// IgnoreUnknown ignores any unknown options and passes them as
+	// remaining command line arguments instead of generating an error.
+	IgnoreUnknown
+
+	// PrintErrors prints any errors which occurred during parsing to
+	// os.Stderr. In the special case of ErrHelp, the message will be printed
+	// to os.Stdout.
+	PrintErrors
+
+	// PassAfterNonOption passes all arguments after the first non option
+	// as remaining command line arguments. This is equivalent to strict
+	// POSIX processing.
+	PassAfterNonOption
+
+	// Default is a convenient default set of options which should cover
+	// most of the uses of the flags package.
+	Default = HelpFlag | PrintErrors | PassDoubleDash
+)
+
+type parseState struct {
+	arg        string
+	args       []string
+	retargs    []string
+	positional []*Arg
+	err        error
+
+	command *Command
+	lookup  lookup
+}
+
+// Parse is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). For more control, use
+// flags.NewParser.
+func Parse(data interface{}) ([]string, error) {
+	return NewParser(data, Default).Parse()
+}
+
+// ParseArgs is a convenience function to parse command line options with default
+// settings. The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"). The args argument is
+// the list of command line arguments to parse. If you just want to parse the
+// default program command line arguments (i.e. os.Args), then use flags.Parse
+// instead. For more control, use flags.NewParser.
+func ParseArgs(data interface{}, args []string) ([]string, error) {
+	return NewParser(data, Default).ParseArgs(args)
+}
+
+// NewParser creates a new parser. It uses os.Args[0] as the application
+// name and then calls Parser.NewNamedParser (see Parser.NewNamedParser for
+// more details). The provided data is a pointer to a struct representing the
+// default option group (named "Application Options"), or nil if the default
+// group should not be added. The options parameter specifies a set of options
+// for the parser.
+func NewParser(data interface{}, options Options) *Parser {
+	p := NewNamedParser(path.Base(os.Args[0]), options)
+
+	if data != nil {
+		g, err := p.AddGroup("Application Options", "", data)
+
+		if err == nil {
+			g.parent = p
+		}
+
+		p.internalError = err
+	}
+
+	return p
+}
+
+// NewNamedParser creates a new parser. The appname is used to display the
+// executable name in the built-in help message. Option groups and commands can
+// be added to this parser by using AddGroup and AddCommand.
+func NewNamedParser(appname string, options Options) *Parser {
+	p := &Parser{
+		Command:               newCommand(appname, "", "", nil),
+		Options:               options,
+		NamespaceDelimiter:    ".",
+		EnvNamespaceDelimiter: "_",
+	}
+
+	p.Command.parent = p
+
+	return p
+}
+
+// Parse parses the command line arguments from os.Args using Parser.ParseArgs.
+// For more detailed information see ParseArgs.
+func (p *Parser) Parse() ([]string, error) {
+	return p.ParseArgs(os.Args[1:])
+}
+
+// ParseArgs parses the command line arguments according to the option groups that
+// were added to the parser. On successful parsing of the arguments, the
+// remaining, non-option, arguments (if any) are returned. The returned error
+// indicates a parsing error and can be used with PrintError to display
+// contextual information on where the error occurred exactly.
+//
+// When the common help group has been added (AddHelp) and either -h or --help
+// was specified in the command line arguments, a help message will be
+// automatically printed if the PrintErrors option is enabled.
+// Furthermore, the special error type ErrHelp is returned.
+// It is up to the caller to exit the program if so desired.
+func (p *Parser) ParseArgs(args []string) ([]string, error) {
+	if p.internalError != nil {
+		return nil, p.internalError
+	}
+
+	p.eachOption(func(c *Command, g *Group, option *Option) {
+		option.clearReferenceBeforeSet = true
+		option.updateDefaultLiteral()
+	})
+
+	// Add built-in help group to all commands if necessary
+	if (p.Options & HelpFlag) != None {
+		p.addHelpGroups(p.showBuiltinHelp)
+	}
+
+	compval := os.Getenv("GO_FLAGS_COMPLETION")
+
+	if len(compval) != 0 {
+		comp := &completion{parser: p}
+		items := comp.complete(args)
+
+		if p.CompletionHandler != nil {
+			p.CompletionHandler(items)
+		} else {
+			comp.print(items, compval == "verbose")
+			os.Exit(0)
+		}
+
+		return nil, nil
+	}
+
+	s := &parseState{
+		args:    args,
+		retargs: make([]string, 0, len(args)),
+	}
+
+	p.fillParseState(s)
+
+	for !s.eof() {
+		var err error
+		arg := s.pop()
+
+		// When PassDoubleDash is set and we encounter a --, then
+		// simply append all the rest as arguments and break out
+		if (p.Options&PassDoubleDash) != None && arg == "--" {
+			s.addArgs(s.args...)
+			break
+		}
+
+		if !argumentIsOption(arg) {
+			if (p.Options&PassAfterNonOption) != None && s.lookup.commands[arg] == nil {
+				// If PassAfterNonOption is set then all remaining arguments
+				// are considered positional
+				if err = s.addArgs(s.arg); err != nil {
+					break
+				}
+
+				if err = s.addArgs(s.args...); err != nil {
+					break
+				}
+
+				break
+			}
+
+			// Note: this also sets s.err, so we can just check for
+			// nil here and use s.err later
+			if p.parseNonOption(s) != nil {
+				break
+			}
+
+			continue
+		}
+
+		prefix, optname, islong := stripOptionPrefix(arg)
+		optname, _, argument := splitOption(prefix, optname, islong)
+
+		if islong {
+			err = p.parseLong(s, optname, argument)
+		} else {
+			err = p.parseShort(s, optname, argument)
+		}
+
+		if err != nil {
+			ignoreUnknown := (p.Options & IgnoreUnknown) != None
+			parseErr := wrapError(err)
+
+			if parseErr.Type != ErrUnknownFlag || (!ignoreUnknown && p.UnknownOptionHandler == nil) {
+				s.err = parseErr
+				break
+			}
+
+			if ignoreUnknown {
+				s.addArgs(arg)
+			} else if p.UnknownOptionHandler != nil {
+				modifiedArgs, err := p.UnknownOptionHandler(optname, strArgument{argument}, s.args)
+
+				if err != nil {
+					s.err = err
+					break
+				}
+
+				s.args = modifiedArgs
+			}
+		}
+	}
+
+	if s.err == nil {
+		p.eachOption(func(c *Command, g *Group, option *Option) {
+			err := option.clearDefault()
+			if err != nil {
+				if _, ok := err.(*Error); !ok {
+					err = p.marshalError(option, err)
+				}
+				s.err = err
+			}
+		})
+
+		s.checkRequired(p)
+	}
+
+	var reterr error
+
+	if s.err != nil {
+		reterr = s.err
+	} else if len(s.command.commands) != 0 && !s.command.SubcommandsOptional {
+		reterr = s.estimateCommand()
+	} else if cmd, ok := s.command.data.(Commander); ok {
+		if p.CommandHandler != nil {
+			reterr = p.CommandHandler(cmd, s.retargs)
+		} else {
+			reterr = cmd.Execute(s.retargs)
+		}
+	} else if p.CommandHandler != nil {
+		reterr = p.CommandHandler(nil, s.retargs)
+	}
+
+	if reterr != nil {
+		var retargs []string
+
+		if ourErr, ok := reterr.(*Error); !ok || ourErr.Type != ErrHelp {
+			retargs = append([]string{s.arg}, s.args...)
+		} else {
+			retargs = s.args
+		}
+
+		return retargs, p.printError(reterr)
+	}
+
+	return s.retargs, nil
+}
+
+func (p *parseState) eof() bool {
+	return len(p.args) == 0
+}
+
+func (p *parseState) pop() string {
+	if p.eof() {
+		return ""
+	}
+
+	p.arg = p.args[0]
+	p.args = p.args[1:]
+
+	return p.arg
+}
+
+func (p *parseState) peek() string {
+	if p.eof() {
+		return ""
+	}
+
+	return p.args[0]
+}
+
+func (p *parseState) checkRequired(parser *Parser) error {
+	c := parser.Command
+
+	var required []*Option
+
+	for c != nil {
+		c.eachGroup(func(g *Group) {
+			for _, option := range g.options {
+				if !option.isSet && option.Required {
+					required = append(required, option)
+				}
+			}
+		})
+
+		c = c.Active
+	}
+
+	if len(required) == 0 {
+		if len(p.positional) > 0 {
+			var reqnames []string
+
+			for _, arg := range p.positional {
+				argRequired := (!arg.isRemaining() && p.command.ArgsRequired) || arg.Required != -1 || arg.RequiredMaximum != -1
+
+				if !argRequired {
+					continue
+				}
+
+				if arg.isRemaining() {
+					if arg.value.Len() < arg.Required {
+						var arguments string
+
+						if arg.Required > 1 {
+							arguments = "arguments, but got only " + fmt.Sprintf("%d", arg.value.Len())
+						} else {
+							arguments = "argument"
+						}
+
+						reqnames = append(reqnames, "`"+arg.Name+" (at least "+fmt.Sprintf("%d", arg.Required)+" "+arguments+")`")
+					} else if arg.RequiredMaximum != -1 && arg.value.Len() > arg.RequiredMaximum {
+						if arg.RequiredMaximum == 0 {
+							reqnames = append(reqnames, "`"+arg.Name+" (zero arguments)`")
+						} else {
+							var arguments string
+
+							if arg.RequiredMaximum > 1 {
+								arguments = "arguments, but got " + fmt.Sprintf("%d", arg.value.Len())
+							} else {
+								arguments = "argument"
+							}
+
+							reqnames = append(reqnames, "`"+arg.Name+" (at most "+fmt.Sprintf("%d", arg.RequiredMaximum)+" "+arguments+")`")
+						}
+					}
+				} else {
+					reqnames = append(reqnames, "`"+arg.Name+"`")
+				}
+			}
+
+			if len(reqnames) == 0 {
+				return nil
+			}
+
+			var msg string
+
+			if len(reqnames) == 1 {
+				msg = fmt.Sprintf("the required argument %s was not provided", reqnames[0])
+			} else {
+				msg = fmt.Sprintf("the required arguments %s and %s were not provided",
+					strings.Join(reqnames[:len(reqnames)-1], ", "), reqnames[len(reqnames)-1])
+			}
+
+			p.err = newError(ErrRequired, msg)
+			return p.err
+		}
+
+		return nil
+	}
+
+	names := make([]string, 0, len(required))
+
+	for _, k := range required {
+		names = append(names, "`"+k.String()+"'")
+	}
+
+	sort.Strings(names)
+
+	var msg string
+
+	if len(names) == 1 {
+		msg = fmt.Sprintf("the required flag %s was not specified", names[0])
+	} else {
+		msg = fmt.Sprintf("the required flags %s and %s were not specified",
+			strings.Join(names[:len(names)-1], ", "), names[len(names)-1])
+	}
+
+	p.err = newError(ErrRequired, msg)
+	return p.err
+}
+
+func (p *parseState) estimateCommand() error {
+	commands := p.command.sortedVisibleCommands()
+	cmdnames := make([]string, len(commands))
+
+	for i, v := range commands {
+		cmdnames[i] = v.Name
+	}
+
+	var msg string
+	var errtype ErrorType
+
+	if len(p.retargs) != 0 {
+		c, l := closestChoice(p.retargs[0], cmdnames)
+		msg = fmt.Sprintf("Unknown command `%s'", p.retargs[0])
+		errtype = ErrUnknownCommand
+
+		if float32(l)/float32(len(c)) < 0.5 {
+			msg = fmt.Sprintf("%s, did you mean `%s'?", msg, c)
+		} else if len(cmdnames) == 1 {
+			msg = fmt.Sprintf("%s. You should use the %s command",
+				msg,
+				cmdnames[0])
+		} else if len(cmdnames) > 1 {
+			msg = fmt.Sprintf("%s. Please specify one command of: %s or %s",
+				msg,
+				strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+				cmdnames[len(cmdnames)-1])
+		}
+	} else {
+		errtype = ErrCommandRequired
+
+		if len(cmdnames) == 1 {
+			msg = fmt.Sprintf("Please specify the %s command", cmdnames[0])
+		} else if len(cmdnames) > 1 {
+			msg = fmt.Sprintf("Please specify one command of: %s or %s",
+				strings.Join(cmdnames[:len(cmdnames)-1], ", "),
+				cmdnames[len(cmdnames)-1])
+		}
+	}
+
+	return newError(errtype, msg)
+}
+
+func (p *Parser) parseOption(s *parseState, name string, option *Option, canarg bool, argument *string) (err error) {
+	if !option.canArgument() {
+		if argument != nil {
+			return newErrorf(ErrNoArgumentForBool, "bool flag `%s' cannot have an argument", option)
+		}
+
+		err = option.set(nil)
+	} else if argument != nil || (canarg && !s.eof()) {
+		var arg string
+
+		if argument != nil {
+			arg = *argument
+		} else {
+			arg = s.pop()
+
+			if validationErr := option.isValidValue(arg); validationErr != nil {
+				return newErrorf(ErrExpectedArgument, validationErr.Error())
+			} else if p.Options&PassDoubleDash != 0 && arg == "--" {
+				return newErrorf(ErrExpectedArgument, "expected argument for flag `%s', but got double dash `--'", option)
+			}
+		}
+
+		if option.tag.Get("unquote") != "false" {
+			arg, err = unquoteIfPossible(arg)
+		}
+
+		if err == nil {
+			err = option.set(&arg)
+		}
+	} else if option.OptionalArgument {
+		option.empty()
+
+		for _, v := range option.OptionalValue {
+			err = option.set(&v)
+
+			if err != nil {
+				break
+			}
+		}
+	} else {
+		err = newErrorf(ErrExpectedArgument, "expected argument for flag `%s'", option)
+	}
+
+	if err != nil {
+		if _, ok := err.(*Error); !ok {
+			err = p.marshalError(option, err)
+		}
+	}
+
+	return err
+}
+
+func (p *Parser) marshalError(option *Option, err error) *Error {
+	s := "invalid argument for flag `%s'"
+
+	expected := p.expectedType(option)
+
+	if expected != "" {
+		s = s + " (expected " + expected + ")"
+	}
+
+	return newErrorf(ErrMarshal, s+": %s",
+		option,
+		err.Error())
+}
+
+func (p *Parser) expectedType(option *Option) string {
+	valueType := option.value.Type()
+
+	if valueType.Kind() == reflect.Func {
+		return ""
+	}
+
+	return valueType.String()
+}
+
+func (p *Parser) parseLong(s *parseState, name string, argument *string) error {
+	if option := s.lookup.longNames[name]; option != nil {
+		// Only long options that are required can consume an argument
+		// from the argument list
+		canarg := !option.OptionalArgument
+
+		return p.parseOption(s, name, option, canarg, argument)
+	}
+
+	return newErrorf(ErrUnknownFlag, "unknown flag `%s'", name)
+}
+
+func (p *Parser) splitShortConcatArg(s *parseState, optname string) (string, *string) {
+	c, n := utf8.DecodeRuneInString(optname)
+
+	if n == len(optname) {
+		return optname, nil
+	}
+
+	first := string(c)
+
+	if option := s.lookup.shortNames[first]; option != nil && option.canArgument() {
+		arg := optname[n:]
+		return first, &arg
+	}
+
+	return optname, nil
+}
+
+func (p *Parser) parseShort(s *parseState, optname string, argument *string) error {
+	if argument == nil {
+		optname, argument = p.splitShortConcatArg(s, optname)
+	}
+
+	for i, c := range optname {
+		shortname := string(c)
+
+		if option := s.lookup.shortNames[shortname]; option != nil {
+			// Only the last short argument can consume an argument from
+			// the arguments list, and only if it's non optional
+			canarg := (i+utf8.RuneLen(c) == len(optname)) && !option.OptionalArgument
+
+			if err := p.parseOption(s, shortname, option, canarg, argument); err != nil {
+				return err
+			}
+		} else {
+			return newErrorf(ErrUnknownFlag, "unknown flag `%s'", shortname)
+		}
+
+		// Only the first option can have a concatted argument, so just
+		// clear argument here
+		argument = nil
+	}
+
+	return nil
+}
+
+func (p *parseState) addArgs(args ...string) error {
+	for len(p.positional) > 0 && len(args) > 0 {
+		arg := p.positional[0]
+
+		if err := convert(args[0], arg.value, arg.tag); err != nil {
+			p.err = err
+			return err
+		}
+
+		if !arg.isRemaining() {
+			p.positional = p.positional[1:]
+		}
+
+		args = args[1:]
+	}
+
+	p.retargs = append(p.retargs, args...)
+	return nil
+}
+
+func (p *Parser) parseNonOption(s *parseState) error {
+	if len(s.positional) > 0 {
+		return s.addArgs(s.arg)
+	}
+
+	if len(s.command.commands) > 0 && len(s.retargs) == 0 {
+		if cmd := s.lookup.commands[s.arg]; cmd != nil {
+			s.command.Active = cmd
+			cmd.fillParseState(s)
+
+			return nil
+		} else if !s.command.SubcommandsOptional {
+			s.addArgs(s.arg)
+			return newErrorf(ErrUnknownCommand, "Unknown command `%s'", s.arg)
+		}
+	}
+
+	return s.addArgs(s.arg)
+}
+
+func (p *Parser) showBuiltinHelp() error {
+	var b bytes.Buffer
+
+	p.WriteHelp(&b)
+	return newError(ErrHelp, b.String())
+}
+
+func (p *Parser) printError(err error) error {
+	if err != nil && (p.Options&PrintErrors) != None {
+		flagsErr, ok := err.(*Error)
+
+		if ok && flagsErr.Type == ErrHelp {
+			fmt.Fprintln(os.Stdout, err)
+		} else {
+			fmt.Fprintln(os.Stderr, err)
+		}
+	}
+
+	return err
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize.go b/vendor/github.com/jessevdk/go-flags/termsize.go
new file mode 100644
index 0000000..829e477
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize.go
@@ -0,0 +1,15 @@
+// +build !windows,!plan9,!appengine,!wasm
+
+package flags
+
+import (
+	"golang.org/x/sys/unix"
+)
+
+func getTerminalColumns() int {
+	ws, err := unix.IoctlGetWinsize(0, unix.TIOCGWINSZ)
+	if err != nil {
+		return 80
+	}
+	return int(ws.Col)
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
new file mode 100644
index 0000000..c1ff186
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize_nosysioctl.go
@@ -0,0 +1,7 @@
+// +build plan9 appengine wasm
+
+package flags
+
+func getTerminalColumns() int {
+	return 80
+}
diff --git a/vendor/github.com/jessevdk/go-flags/termsize_windows.go b/vendor/github.com/jessevdk/go-flags/termsize_windows.go
new file mode 100644
index 0000000..5c0fa6b
--- /dev/null
+++ b/vendor/github.com/jessevdk/go-flags/termsize_windows.go
@@ -0,0 +1,85 @@
+// +build windows
+
+package flags
+
+import (
+	"syscall"
+	"unsafe"
+)
+
+type (
+	SHORT int16
+	WORD  uint16
+
+	SMALL_RECT struct {
+		Left   SHORT
+		Top    SHORT
+		Right  SHORT
+		Bottom SHORT
+	}
+
+	COORD struct {
+		X SHORT
+		Y SHORT
+	}
+
+	CONSOLE_SCREEN_BUFFER_INFO struct {
+		Size              COORD
+		CursorPosition    COORD
+		Attributes        WORD
+		Window            SMALL_RECT
+		MaximumWindowSize COORD
+	}
+)
+
+var kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
+var getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo")
+
+func getError(r1, r2 uintptr, lastErr error) error {
+	// If the function fails, the return value is zero.
+	if r1 == 0 {
+		if lastErr != nil {
+			return lastErr
+		}
+		return syscall.EINVAL
+	}
+	return nil
+}
+
+func getStdHandle(stdhandle int) (uintptr, error) {
+	handle, err := syscall.GetStdHandle(stdhandle)
+	if err != nil {
+		return 0, err
+	}
+	return uintptr(handle), nil
+}
+
+// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer.
+// http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx
+func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) {
+	var info CONSOLE_SCREEN_BUFFER_INFO
+	if err := getError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)); err != nil {
+		return nil, err
+	}
+	return &info, nil
+}
+
+func getTerminalColumns() int {
+	defaultWidth := 80
+
+	stdoutHandle, err := getStdHandle(syscall.STD_OUTPUT_HANDLE)
+	if err != nil {
+		return defaultWidth
+	}
+
+	info, err := GetConsoleScreenBufferInfo(stdoutHandle)
+	if err != nil {
+		return defaultWidth
+	}
+
+	if info.MaximumWindowSize.X > 0 {
+		return int(info.MaximumWindowSize.X)
+	}
+
+	return defaultWidth
+}
diff --git a/vendor/github.com/mattn/go-runewidth/.travis.yml b/vendor/github.com/mattn/go-runewidth/.travis.yml
new file mode 100644
index 0000000..6a21813
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/.travis.yml
@@ -0,0 +1,16 @@
+language: go
+sudo: false
+go:
+  - 1.13.x
+  - tip
+
+before_install:
+  - go get -t -v ./...
+
+script:
+  - go generate
+  - git diff --cached --exit-code
+  - ./go.test.sh
+
+after_success:
+  - bash <(curl -s https://codecov.io/bash)
diff --git a/vendor/github.com/mattn/go-runewidth/LICENSE b/vendor/github.com/mattn/go-runewidth/LICENSE
new file mode 100644
index 0000000..91b5cef
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Yasuhiro Matsumoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/mattn/go-runewidth/README.md b/vendor/github.com/mattn/go-runewidth/README.md
new file mode 100644
index 0000000..aa56ab9
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/README.md
@@ -0,0 +1,27 @@
+go-runewidth
+============
+
+[![Build Status](https://travis-ci.org/mattn/go-runewidth.png?branch=master)](https://travis-ci.org/mattn/go-runewidth)
+[![Codecov](https://codecov.io/gh/mattn/go-runewidth/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-runewidth)
+[![GoDoc](https://godoc.org/github.com/mattn/go-runewidth?status.svg)](http://godoc.org/github.com/mattn/go-runewidth)
+[![Go Report Card](https://goreportcard.com/badge/github.com/mattn/go-runewidth)](https://goreportcard.com/report/github.com/mattn/go-runewidth)
+
+Provides functions to get fixed width of the character or string.
+
+Usage
+-----
+
+```go
+runewidth.StringWidth("つのだ☆HIRO") == 12
+```
+
+
+Author
+------
+
+Yasuhiro Matsumoto
+
+License
+-------
+
+under the MIT License: http://mattn.mit-license.org/2013
diff --git a/vendor/github.com/mattn/go-runewidth/go.mod b/vendor/github.com/mattn/go-runewidth/go.mod
new file mode 100644
index 0000000..8a9d524
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/go.mod
@@ -0,0 +1,5 @@
+module github.com/mattn/go-runewidth
+
+go 1.9
+
+require github.com/rivo/uniseg v0.1.0
diff --git a/vendor/github.com/mattn/go-runewidth/go.sum b/vendor/github.com/mattn/go-runewidth/go.sum
new file mode 100644
index 0000000..0213566
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/go.sum
@@ -0,0 +1,2 @@
+github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
+github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
diff --git a/vendor/github.com/mattn/go-runewidth/go.test.sh b/vendor/github.com/mattn/go-runewidth/go.test.sh
new file mode 100644
index 0000000..012162b
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/go.test.sh
@@ -0,0 +1,12 @@
+#!/usr/bin/env bash
+
+set -e
+echo "" > coverage.txt
+
+for d in $(go list ./... | grep -v vendor); do
+    go test -race -coverprofile=profile.out -covermode=atomic "$d"
+    if [ -f profile.out ]; then
+        cat profile.out >> coverage.txt
+        rm profile.out
+    fi
+done
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth.go b/vendor/github.com/mattn/go-runewidth/runewidth.go
new file mode 100644
index 0000000..f3871a6
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth.go
@@ -0,0 +1,241 @@
+package runewidth
+
+import (
+	"os"
+
+	"github.com/rivo/uniseg"
+)
+
+//go:generate go run script/generate.go
+
+var (
+	// EastAsianWidth will be set true if the current locale is CJK
+	EastAsianWidth bool
+
+	// DefaultCondition is a condition in current locale
+	DefaultCondition = &Condition{}
+)
+
+func init() {
+	handleEnv()
+}
+
+func handleEnv() {
+	env := os.Getenv("RUNEWIDTH_EASTASIAN")
+	if env == "" {
+		EastAsianWidth = IsEastAsian()
+	} else {
+		EastAsianWidth = env == "1"
+	}
+	// update DefaultCondition
+	DefaultCondition.EastAsianWidth = EastAsianWidth
+}
+
+type interval struct {
+	first rune
+	last  rune
+}
+
+type table []interval
+
+func inTables(r rune, ts ...table) bool {
+	for _, t := range ts {
+		if inTable(r, t) {
+			return true
+		}
+	}
+	return false
+}
+
+func inTable(r rune, t table) bool {
+	if r < t[0].first {
+		return false
+	}
+
+	bot := 0
+	top := len(t) - 1
+	for top >= bot {
+		mid := (bot + top) >> 1
+
+		switch {
+		case t[mid].last < r:
+			bot = mid + 1
+		case t[mid].first > r:
+			top = mid - 1
+		default:
+			return true
+		}
+	}
+
+	return false
+}
+
+var private = table{
+	{0x00E000, 0x00F8FF}, {0x0F0000, 0x0FFFFD}, {0x100000, 0x10FFFD},
+}
+
+var nonprint = table{
+	{0x0000, 0x001F}, {0x007F, 0x009F}, {0x00AD, 0x00AD},
+	{0x070F, 0x070F}, {0x180B, 0x180E}, {0x200B, 0x200F},
+	{0x2028, 0x202E}, {0x206A, 0x206F}, {0xD800, 0xDFFF},
+	{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFB}, {0xFFFE, 0xFFFF},
+}
+
+// Condition have flag EastAsianWidth whether the current locale is CJK or not.
+type Condition struct {
+	EastAsianWidth bool
+}
+
+// NewCondition return new instance of Condition which is current locale.
+func NewCondition() *Condition {
+	return &Condition{
+		EastAsianWidth: EastAsianWidth,
+	}
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func (c *Condition) RuneWidth(r rune) int {
+	switch {
+	case r < 0 || r > 0x10FFFF || inTables(r, nonprint, combining, notassigned):
+		return 0
+	case (c.EastAsianWidth && IsAmbiguousWidth(r)) || inTables(r, doublewidth):
+		return 2
+	default:
+		return 1
+	}
+}
+
+// StringWidth return width as you can see
+func (c *Condition) StringWidth(s string) (width int) {
+	g := uniseg.NewGraphemes(s)
+	for g.Next() {
+		var chWidth int
+		for _, r := range g.Runes() {
+			chWidth = c.RuneWidth(r)
+			if chWidth > 0 {
+				break // Our best guess at this point is to use the width of the first non-zero-width rune.
+			}
+		}
+		width += chWidth
+	}
+	return
+}
+
+// Truncate return string truncated with w cells
+func (c *Condition) Truncate(s string, w int, tail string) string {
+	if c.StringWidth(s) <= w {
+		return s
+	}
+	w -= c.StringWidth(tail)
+	var width int
+	pos := len(s)
+	g := uniseg.NewGraphemes(s)
+	for g.Next() {
+		var chWidth int
+		for _, r := range g.Runes() {
+			chWidth = c.RuneWidth(r)
+			if chWidth > 0 {
+				break // See StringWidth() for details.
+			}
+		}
+		if width+chWidth > w {
+			pos, _ = g.Positions()
+			break
+		}
+		width += chWidth
+	}
+	return s[:pos] + tail
+}
+
+// Wrap return string wrapped with w cells
+func (c *Condition) Wrap(s string, w int) string {
+	width := 0
+	out := ""
+	for _, r := range []rune(s) {
+		cw := c.RuneWidth(r)
+		if r == '\n' {
+			out += string(r)
+			width = 0
+			continue
+		} else if width+cw > w {
+			out += "\n"
+			width = 0
+			out += string(r)
+			width += cw
+			continue
+		}
+		out += string(r)
+		width += cw
+	}
+	return out
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func (c *Condition) FillLeft(s string, w int) string {
+	width := c.StringWidth(s)
+	count := w - width
+	if count > 0 {
+		b := make([]byte, count)
+		for i := range b {
+			b[i] = ' '
+		}
+		return string(b) + s
+	}
+	return s
+}
+
+// FillRight return string filled in left by spaces in w cells
+func (c *Condition) FillRight(s string, w int) string {
+	width := c.StringWidth(s)
+	count := w - width
+	if count > 0 {
+		b := make([]byte, count)
+		for i := range b {
+			b[i] = ' '
+		}
+		return s + string(b)
+	}
+	return s
+}
+
+// RuneWidth returns the number of cells in r.
+// See http://www.unicode.org/reports/tr11/
+func RuneWidth(r rune) int {
+	return DefaultCondition.RuneWidth(r)
+}
+
+// IsAmbiguousWidth returns whether is ambiguous width or not.
+func IsAmbiguousWidth(r rune) bool {
+	return inTables(r, private, ambiguous)
+}
+
+// IsNeutralWidth returns whether is neutral width or not.
+func IsNeutralWidth(r rune) bool {
+	return inTable(r, neutral)
+}
+
+// StringWidth return width as you can see
+func StringWidth(s string) (width int) {
+	return DefaultCondition.StringWidth(s)
+}
+
+// Truncate return string truncated with w cells
+func Truncate(s string, w int, tail string) string {
+	return DefaultCondition.Truncate(s, w, tail)
+}
+
+// Wrap return string wrapped with w cells
+func Wrap(s string, w int) string {
+	return DefaultCondition.Wrap(s, w)
+}
+
+// FillLeft return string filled in left by spaces in w cells
+func FillLeft(s string, w int) string {
+	return DefaultCondition.FillLeft(s, w)
+}
+
+// FillRight return string filled in left by spaces in w cells
+func FillRight(s string, w int) string {
+	return DefaultCondition.FillRight(s, w)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
new file mode 100644
index 0000000..7d99f6e
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_appengine.go
@@ -0,0 +1,8 @@
+// +build appengine
+
+package runewidth
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+	return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_js.go b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
new file mode 100644
index 0000000..c5fdf40
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_js.go
@@ -0,0 +1,9 @@
+// +build js
+// +build !appengine
+
+package runewidth
+
+func IsEastAsian() bool {
+	// TODO: Implement this for the web. Detect east asian in a compatible way, and return true.
+	return false
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_posix.go b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
new file mode 100644
index 0000000..480ad74
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_posix.go
@@ -0,0 +1,82 @@
+// +build !windows
+// +build !js
+// +build !appengine
+
+package runewidth
+
+import (
+	"os"
+	"regexp"
+	"strings"
+)
+
+var reLoc = regexp.MustCompile(`^[a-z][a-z][a-z]?(?:_[A-Z][A-Z])?\.(.+)`)
+
+var mblenTable = map[string]int{
+	"utf-8":   6,
+	"utf8":    6,
+	"jis":     8,
+	"eucjp":   3,
+	"euckr":   2,
+	"euccn":   2,
+	"sjis":    2,
+	"cp932":   2,
+	"cp51932": 2,
+	"cp936":   2,
+	"cp949":   2,
+	"cp950":   2,
+	"big5":    2,
+	"gbk":     2,
+	"gb2312":  2,
+}
+
+func isEastAsian(locale string) bool {
+	charset := strings.ToLower(locale)
+	r := reLoc.FindStringSubmatch(locale)
+	if len(r) == 2 {
+		charset = strings.ToLower(r[1])
+	}
+
+	if strings.HasSuffix(charset, "@cjk_narrow") {
+		return false
+	}
+
+	for pos, b := range []byte(charset) {
+		if b == '@' {
+			charset = charset[:pos]
+			break
+		}
+	}
+	max := 1
+	if m, ok := mblenTable[charset]; ok {
+		max = m
+	}
+	if max > 1 && (charset[0] != 'u' ||
+		strings.HasPrefix(locale, "ja") ||
+		strings.HasPrefix(locale, "ko") ||
+		strings.HasPrefix(locale, "zh")) {
+		return true
+	}
+	return false
+}
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+	locale := os.Getenv("LC_ALL")
+	if locale == "" {
+		locale = os.Getenv("LC_CTYPE")
+	}
+	if locale == "" {
+		locale = os.Getenv("LANG")
+	}
+
+	// ignore C locale
+	if locale == "POSIX" || locale == "C" {
+		return false
+	}
+	if len(locale) > 1 && locale[0] == 'C' && (locale[1] == '.' || locale[1] == '-') {
+		return false
+	}
+
+	return isEastAsian(locale)
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_table.go b/vendor/github.com/mattn/go-runewidth/runewidth_table.go
new file mode 100644
index 0000000..b27d77d
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_table.go
@@ -0,0 +1,437 @@
+// Code generated by script/generate.go. DO NOT EDIT.
+
+package runewidth
+
+var combining = table{
+	{0x0300, 0x036F}, {0x0483, 0x0489}, {0x07EB, 0x07F3},
+	{0x0C00, 0x0C00}, {0x0C04, 0x0C04}, {0x0D00, 0x0D01},
+	{0x135D, 0x135F}, {0x1A7F, 0x1A7F}, {0x1AB0, 0x1AC0},
+	{0x1B6B, 0x1B73}, {0x1DC0, 0x1DF9}, {0x1DFB, 0x1DFF},
+	{0x20D0, 0x20F0}, {0x2CEF, 0x2CF1}, {0x2DE0, 0x2DFF},
+	{0x3099, 0x309A}, {0xA66F, 0xA672}, {0xA674, 0xA67D},
+	{0xA69E, 0xA69F}, {0xA6F0, 0xA6F1}, {0xA8E0, 0xA8F1},
+	{0xFE20, 0xFE2F}, {0x101FD, 0x101FD}, {0x10376, 0x1037A},
+	{0x10EAB, 0x10EAC}, {0x10F46, 0x10F50}, {0x11300, 0x11301},
+	{0x1133B, 0x1133C}, {0x11366, 0x1136C}, {0x11370, 0x11374},
+	{0x16AF0, 0x16AF4}, {0x1D165, 0x1D169}, {0x1D16D, 0x1D172},
+	{0x1D17B, 0x1D182}, {0x1D185, 0x1D18B}, {0x1D1AA, 0x1D1AD},
+	{0x1D242, 0x1D244}, {0x1E000, 0x1E006}, {0x1E008, 0x1E018},
+	{0x1E01B, 0x1E021}, {0x1E023, 0x1E024}, {0x1E026, 0x1E02A},
+	{0x1E8D0, 0x1E8D6},
+}
+
+var doublewidth = table{
+	{0x1100, 0x115F}, {0x231A, 0x231B}, {0x2329, 0x232A},
+	{0x23E9, 0x23EC}, {0x23F0, 0x23F0}, {0x23F3, 0x23F3},
+	{0x25FD, 0x25FE}, {0x2614, 0x2615}, {0x2648, 0x2653},
+	{0x267F, 0x267F}, {0x2693, 0x2693}, {0x26A1, 0x26A1},
+	{0x26AA, 0x26AB}, {0x26BD, 0x26BE}, {0x26C4, 0x26C5},
+	{0x26CE, 0x26CE}, {0x26D4, 0x26D4}, {0x26EA, 0x26EA},
+	{0x26F2, 0x26F3}, {0x26F5, 0x26F5}, {0x26FA, 0x26FA},
+	{0x26FD, 0x26FD}, {0x2705, 0x2705}, {0x270A, 0x270B},
+	{0x2728, 0x2728}, {0x274C, 0x274C}, {0x274E, 0x274E},
+	{0x2753, 0x2755}, {0x2757, 0x2757}, {0x2795, 0x2797},
+	{0x27B0, 0x27B0}, {0x27BF, 0x27BF}, {0x2B1B, 0x2B1C},
+	{0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x2E80, 0x2E99},
+	{0x2E9B, 0x2EF3}, {0x2F00, 0x2FD5}, {0x2FF0, 0x2FFB},
+	{0x3000, 0x303E}, {0x3041, 0x3096}, {0x3099, 0x30FF},
+	{0x3105, 0x312F}, {0x3131, 0x318E}, {0x3190, 0x31E3},
+	{0x31F0, 0x321E}, {0x3220, 0x3247}, {0x3250, 0x4DBF},
+	{0x4E00, 0xA48C}, {0xA490, 0xA4C6}, {0xA960, 0xA97C},
+	{0xAC00, 0xD7A3}, {0xF900, 0xFAFF}, {0xFE10, 0xFE19},
+	{0xFE30, 0xFE52}, {0xFE54, 0xFE66}, {0xFE68, 0xFE6B},
+	{0xFF01, 0xFF60}, {0xFFE0, 0xFFE6}, {0x16FE0, 0x16FE4},
+	{0x16FF0, 0x16FF1}, {0x17000, 0x187F7}, {0x18800, 0x18CD5},
+	{0x18D00, 0x18D08}, {0x1B000, 0x1B11E}, {0x1B150, 0x1B152},
+	{0x1B164, 0x1B167}, {0x1B170, 0x1B2FB}, {0x1F004, 0x1F004},
+	{0x1F0CF, 0x1F0CF}, {0x1F18E, 0x1F18E}, {0x1F191, 0x1F19A},
+	{0x1F200, 0x1F202}, {0x1F210, 0x1F23B}, {0x1F240, 0x1F248},
+	{0x1F250, 0x1F251}, {0x1F260, 0x1F265}, {0x1F300, 0x1F320},
+	{0x1F32D, 0x1F335}, {0x1F337, 0x1F37C}, {0x1F37E, 0x1F393},
+	{0x1F3A0, 0x1F3CA}, {0x1F3CF, 0x1F3D3}, {0x1F3E0, 0x1F3F0},
+	{0x1F3F4, 0x1F3F4}, {0x1F3F8, 0x1F43E}, {0x1F440, 0x1F440},
+	{0x1F442, 0x1F4FC}, {0x1F4FF, 0x1F53D}, {0x1F54B, 0x1F54E},
+	{0x1F550, 0x1F567}, {0x1F57A, 0x1F57A}, {0x1F595, 0x1F596},
+	{0x1F5A4, 0x1F5A4}, {0x1F5FB, 0x1F64F}, {0x1F680, 0x1F6C5},
+	{0x1F6CC, 0x1F6CC}, {0x1F6D0, 0x1F6D2}, {0x1F6D5, 0x1F6D7},
+	{0x1F6EB, 0x1F6EC}, {0x1F6F4, 0x1F6FC}, {0x1F7E0, 0x1F7EB},
+	{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1F978},
+	{0x1F97A, 0x1F9CB}, {0x1F9CD, 0x1F9FF}, {0x1FA70, 0x1FA74},
+	{0x1FA78, 0x1FA7A}, {0x1FA80, 0x1FA86}, {0x1FA90, 0x1FAA8},
+	{0x1FAB0, 0x1FAB6}, {0x1FAC0, 0x1FAC2}, {0x1FAD0, 0x1FAD6},
+	{0x20000, 0x2FFFD}, {0x30000, 0x3FFFD},
+}
+
+var ambiguous = table{
+	{0x00A1, 0x00A1}, {0x00A4, 0x00A4}, {0x00A7, 0x00A8},
+	{0x00AA, 0x00AA}, {0x00AD, 0x00AE}, {0x00B0, 0x00B4},
+	{0x00B6, 0x00BA}, {0x00BC, 0x00BF}, {0x00C6, 0x00C6},
+	{0x00D0, 0x00D0}, {0x00D7, 0x00D8}, {0x00DE, 0x00E1},
+	{0x00E6, 0x00E6}, {0x00E8, 0x00EA}, {0x00EC, 0x00ED},
+	{0x00F0, 0x00F0}, {0x00F2, 0x00F3}, {0x00F7, 0x00FA},
+	{0x00FC, 0x00FC}, {0x00FE, 0x00FE}, {0x0101, 0x0101},
+	{0x0111, 0x0111}, {0x0113, 0x0113}, {0x011B, 0x011B},
+	{0x0126, 0x0127}, {0x012B, 0x012B}, {0x0131, 0x0133},
+	{0x0138, 0x0138}, {0x013F, 0x0142}, {0x0144, 0x0144},
+	{0x0148, 0x014B}, {0x014D, 0x014D}, {0x0152, 0x0153},
+	{0x0166, 0x0167}, {0x016B, 0x016B}, {0x01CE, 0x01CE},
+	{0x01D0, 0x01D0}, {0x01D2, 0x01D2}, {0x01D4, 0x01D4},
+	{0x01D6, 0x01D6}, {0x01D8, 0x01D8}, {0x01DA, 0x01DA},
+	{0x01DC, 0x01DC}, {0x0251, 0x0251}, {0x0261, 0x0261},
+	{0x02C4, 0x02C4}, {0x02C7, 0x02C7}, {0x02C9, 0x02CB},
+	{0x02CD, 0x02CD}, {0x02D0, 0x02D0}, {0x02D8, 0x02DB},
+	{0x02DD, 0x02DD}, {0x02DF, 0x02DF}, {0x0300, 0x036F},
+	{0x0391, 0x03A1}, {0x03A3, 0x03A9}, {0x03B1, 0x03C1},
+	{0x03C3, 0x03C9}, {0x0401, 0x0401}, {0x0410, 0x044F},
+	{0x0451, 0x0451}, {0x2010, 0x2010}, {0x2013, 0x2016},
+	{0x2018, 0x2019}, {0x201C, 0x201D}, {0x2020, 0x2022},
+	{0x2024, 0x2027}, {0x2030, 0x2030}, {0x2032, 0x2033},
+	{0x2035, 0x2035}, {0x203B, 0x203B}, {0x203E, 0x203E},
+	{0x2074, 0x2074}, {0x207F, 0x207F}, {0x2081, 0x2084},
+	{0x20AC, 0x20AC}, {0x2103, 0x2103}, {0x2105, 0x2105},
+	{0x2109, 0x2109}, {0x2113, 0x2113}, {0x2116, 0x2116},
+	{0x2121, 0x2122}, {0x2126, 0x2126}, {0x212B, 0x212B},
+	{0x2153, 0x2154}, {0x215B, 0x215E}, {0x2160, 0x216B},
+	{0x2170, 0x2179}, {0x2189, 0x2189}, {0x2190, 0x2199},
+	{0x21B8, 0x21B9}, {0x21D2, 0x21D2}, {0x21D4, 0x21D4},
+	{0x21E7, 0x21E7}, {0x2200, 0x2200}, {0x2202, 0x2203},
+	{0x2207, 0x2208}, {0x220B, 0x220B}, {0x220F, 0x220F},
+	{0x2211, 0x2211}, {0x2215, 0x2215}, {0x221A, 0x221A},
+	{0x221D, 0x2220}, {0x2223, 0x2223}, {0x2225, 0x2225},
+	{0x2227, 0x222C}, {0x222E, 0x222E}, {0x2234, 0x2237},
+	{0x223C, 0x223D}, {0x2248, 0x2248}, {0x224C, 0x224C},
+	{0x2252, 0x2252}, {0x2260, 0x2261}, {0x2264, 0x2267},
+	{0x226A, 0x226B}, {0x226E, 0x226F}, {0x2282, 0x2283},
+	{0x2286, 0x2287}, {0x2295, 0x2295}, {0x2299, 0x2299},
+	{0x22A5, 0x22A5}, {0x22BF, 0x22BF}, {0x2312, 0x2312},
+	{0x2460, 0x24E9}, {0x24EB, 0x254B}, {0x2550, 0x2573},
+	{0x2580, 0x258F}, {0x2592, 0x2595}, {0x25A0, 0x25A1},
+	{0x25A3, 0x25A9}, {0x25B2, 0x25B3}, {0x25B6, 0x25B7},
+	{0x25BC, 0x25BD}, {0x25C0, 0x25C1}, {0x25C6, 0x25C8},
+	{0x25CB, 0x25CB}, {0x25CE, 0x25D1}, {0x25E2, 0x25E5},
+	{0x25EF, 0x25EF}, {0x2605, 0x2606}, {0x2609, 0x2609},
+	{0x260E, 0x260F}, {0x261C, 0x261C}, {0x261E, 0x261E},
+	{0x2640, 0x2640}, {0x2642, 0x2642}, {0x2660, 0x2661},
+	{0x2663, 0x2665}, {0x2667, 0x266A}, {0x266C, 0x266D},
+	{0x266F, 0x266F}, {0x269E, 0x269F}, {0x26BF, 0x26BF},
+	{0x26C6, 0x26CD}, {0x26CF, 0x26D3}, {0x26D5, 0x26E1},
+	{0x26E3, 0x26E3}, {0x26E8, 0x26E9}, {0x26EB, 0x26F1},
+	{0x26F4, 0x26F4}, {0x26F6, 0x26F9}, {0x26FB, 0x26FC},
+	{0x26FE, 0x26FF}, {0x273D, 0x273D}, {0x2776, 0x277F},
+	{0x2B56, 0x2B59}, {0x3248, 0x324F}, {0xE000, 0xF8FF},
+	{0xFE00, 0xFE0F}, {0xFFFD, 0xFFFD}, {0x1F100, 0x1F10A},
+	{0x1F110, 0x1F12D}, {0x1F130, 0x1F169}, {0x1F170, 0x1F18D},
+	{0x1F18F, 0x1F190}, {0x1F19B, 0x1F1AC}, {0xE0100, 0xE01EF},
+	{0xF0000, 0xFFFFD}, {0x100000, 0x10FFFD},
+}
+var notassigned = table{
+	{0x27E6, 0x27ED}, {0x2985, 0x2986},
+}
+
+var neutral = table{
+	{0x0000, 0x001F}, {0x007F, 0x00A0}, {0x00A9, 0x00A9},
+	{0x00AB, 0x00AB}, {0x00B5, 0x00B5}, {0x00BB, 0x00BB},
+	{0x00C0, 0x00C5}, {0x00C7, 0x00CF}, {0x00D1, 0x00D6},
+	{0x00D9, 0x00DD}, {0x00E2, 0x00E5}, {0x00E7, 0x00E7},
+	{0x00EB, 0x00EB}, {0x00EE, 0x00EF}, {0x00F1, 0x00F1},
+	{0x00F4, 0x00F6}, {0x00FB, 0x00FB}, {0x00FD, 0x00FD},
+	{0x00FF, 0x0100}, {0x0102, 0x0110}, {0x0112, 0x0112},
+	{0x0114, 0x011A}, {0x011C, 0x0125}, {0x0128, 0x012A},
+	{0x012C, 0x0130}, {0x0134, 0x0137}, {0x0139, 0x013E},
+	{0x0143, 0x0143}, {0x0145, 0x0147}, {0x014C, 0x014C},
+	{0x014E, 0x0151}, {0x0154, 0x0165}, {0x0168, 0x016A},
+	{0x016C, 0x01CD}, {0x01CF, 0x01CF}, {0x01D1, 0x01D1},
+	{0x01D3, 0x01D3}, {0x01D5, 0x01D5}, {0x01D7, 0x01D7},
+	{0x01D9, 0x01D9}, {0x01DB, 0x01DB}, {0x01DD, 0x0250},
+	{0x0252, 0x0260}, {0x0262, 0x02C3}, {0x02C5, 0x02C6},
+	{0x02C8, 0x02C8}, {0x02CC, 0x02CC}, {0x02CE, 0x02CF},
+	{0x02D1, 0x02D7}, {0x02DC, 0x02DC}, {0x02DE, 0x02DE},
+	{0x02E0, 0x02FF}, {0x0370, 0x0377}, {0x037A, 0x037F},
+	{0x0384, 0x038A}, {0x038C, 0x038C}, {0x038E, 0x0390},
+	{0x03AA, 0x03B0}, {0x03C2, 0x03C2}, {0x03CA, 0x0400},
+	{0x0402, 0x040F}, {0x0450, 0x0450}, {0x0452, 0x052F},
+	{0x0531, 0x0556}, {0x0559, 0x058A}, {0x058D, 0x058F},
+	{0x0591, 0x05C7}, {0x05D0, 0x05EA}, {0x05EF, 0x05F4},
+	{0x0600, 0x061C}, {0x061E, 0x070D}, {0x070F, 0x074A},
+	{0x074D, 0x07B1}, {0x07C0, 0x07FA}, {0x07FD, 0x082D},
+	{0x0830, 0x083E}, {0x0840, 0x085B}, {0x085E, 0x085E},
+	{0x0860, 0x086A}, {0x08A0, 0x08B4}, {0x08B6, 0x08C7},
+	{0x08D3, 0x0983}, {0x0985, 0x098C}, {0x098F, 0x0990},
+	{0x0993, 0x09A8}, {0x09AA, 0x09B0}, {0x09B2, 0x09B2},
+	{0x09B6, 0x09B9}, {0x09BC, 0x09C4}, {0x09C7, 0x09C8},
+	{0x09CB, 0x09CE}, {0x09D7, 0x09D7}, {0x09DC, 0x09DD},
+	{0x09DF, 0x09E3}, {0x09E6, 0x09FE}, {0x0A01, 0x0A03},
+	{0x0A05, 0x0A0A}, {0x0A0F, 0x0A10}, {0x0A13, 0x0A28},
+	{0x0A2A, 0x0A30}, {0x0A32, 0x0A33}, {0x0A35, 0x0A36},
+	{0x0A38, 0x0A39}, {0x0A3C, 0x0A3C}, {0x0A3E, 0x0A42},
+	{0x0A47, 0x0A48}, {0x0A4B, 0x0A4D}, {0x0A51, 0x0A51},
+	{0x0A59, 0x0A5C}, {0x0A5E, 0x0A5E}, {0x0A66, 0x0A76},
+	{0x0A81, 0x0A83}, {0x0A85, 0x0A8D}, {0x0A8F, 0x0A91},
+	{0x0A93, 0x0AA8}, {0x0AAA, 0x0AB0}, {0x0AB2, 0x0AB3},
+	{0x0AB5, 0x0AB9}, {0x0ABC, 0x0AC5}, {0x0AC7, 0x0AC9},
+	{0x0ACB, 0x0ACD}, {0x0AD0, 0x0AD0}, {0x0AE0, 0x0AE3},
+	{0x0AE6, 0x0AF1}, {0x0AF9, 0x0AFF}, {0x0B01, 0x0B03},
+	{0x0B05, 0x0B0C}, {0x0B0F, 0x0B10}, {0x0B13, 0x0B28},
+	{0x0B2A, 0x0B30}, {0x0B32, 0x0B33}, {0x0B35, 0x0B39},
+	{0x0B3C, 0x0B44}, {0x0B47, 0x0B48}, {0x0B4B, 0x0B4D},
+	{0x0B55, 0x0B57}, {0x0B5C, 0x0B5D}, {0x0B5F, 0x0B63},
+	{0x0B66, 0x0B77}, {0x0B82, 0x0B83}, {0x0B85, 0x0B8A},
+	{0x0B8E, 0x0B90}, {0x0B92, 0x0B95}, {0x0B99, 0x0B9A},
+	{0x0B9C, 0x0B9C}, {0x0B9E, 0x0B9F}, {0x0BA3, 0x0BA4},
+	{0x0BA8, 0x0BAA}, {0x0BAE, 0x0BB9}, {0x0BBE, 0x0BC2},
+	{0x0BC6, 0x0BC8}, {0x0BCA, 0x0BCD}, {0x0BD0, 0x0BD0},
+	{0x0BD7, 0x0BD7}, {0x0BE6, 0x0BFA}, {0x0C00, 0x0C0C},
+	{0x0C0E, 0x0C10}, {0x0C12, 0x0C28}, {0x0C2A, 0x0C39},
+	{0x0C3D, 0x0C44}, {0x0C46, 0x0C48}, {0x0C4A, 0x0C4D},
+	{0x0C55, 0x0C56}, {0x0C58, 0x0C5A}, {0x0C60, 0x0C63},
+	{0x0C66, 0x0C6F}, {0x0C77, 0x0C8C}, {0x0C8E, 0x0C90},
+	{0x0C92, 0x0CA8}, {0x0CAA, 0x0CB3}, {0x0CB5, 0x0CB9},
+	{0x0CBC, 0x0CC4}, {0x0CC6, 0x0CC8}, {0x0CCA, 0x0CCD},
+	{0x0CD5, 0x0CD6}, {0x0CDE, 0x0CDE}, {0x0CE0, 0x0CE3},
+	{0x0CE6, 0x0CEF}, {0x0CF1, 0x0CF2}, {0x0D00, 0x0D0C},
+	{0x0D0E, 0x0D10}, {0x0D12, 0x0D44}, {0x0D46, 0x0D48},
+	{0x0D4A, 0x0D4F}, {0x0D54, 0x0D63}, {0x0D66, 0x0D7F},
+	{0x0D81, 0x0D83}, {0x0D85, 0x0D96}, {0x0D9A, 0x0DB1},
+	{0x0DB3, 0x0DBB}, {0x0DBD, 0x0DBD}, {0x0DC0, 0x0DC6},
+	{0x0DCA, 0x0DCA}, {0x0DCF, 0x0DD4}, {0x0DD6, 0x0DD6},
+	{0x0DD8, 0x0DDF}, {0x0DE6, 0x0DEF}, {0x0DF2, 0x0DF4},
+	{0x0E01, 0x0E3A}, {0x0E3F, 0x0E5B}, {0x0E81, 0x0E82},
+	{0x0E84, 0x0E84}, {0x0E86, 0x0E8A}, {0x0E8C, 0x0EA3},
+	{0x0EA5, 0x0EA5}, {0x0EA7, 0x0EBD}, {0x0EC0, 0x0EC4},
+	{0x0EC6, 0x0EC6}, {0x0EC8, 0x0ECD}, {0x0ED0, 0x0ED9},
+	{0x0EDC, 0x0EDF}, {0x0F00, 0x0F47}, {0x0F49, 0x0F6C},
+	{0x0F71, 0x0F97}, {0x0F99, 0x0FBC}, {0x0FBE, 0x0FCC},
+	{0x0FCE, 0x0FDA}, {0x1000, 0x10C5}, {0x10C7, 0x10C7},
+	{0x10CD, 0x10CD}, {0x10D0, 0x10FF}, {0x1160, 0x1248},
+	{0x124A, 0x124D}, {0x1250, 0x1256}, {0x1258, 0x1258},
+	{0x125A, 0x125D}, {0x1260, 0x1288}, {0x128A, 0x128D},
+	{0x1290, 0x12B0}, {0x12B2, 0x12B5}, {0x12B8, 0x12BE},
+	{0x12C0, 0x12C0}, {0x12C2, 0x12C5}, {0x12C8, 0x12D6},
+	{0x12D8, 0x1310}, {0x1312, 0x1315}, {0x1318, 0x135A},
+	{0x135D, 0x137C}, {0x1380, 0x1399}, {0x13A0, 0x13F5},
+	{0x13F8, 0x13FD}, {0x1400, 0x169C}, {0x16A0, 0x16F8},
+	{0x1700, 0x170C}, {0x170E, 0x1714}, {0x1720, 0x1736},
+	{0x1740, 0x1753}, {0x1760, 0x176C}, {0x176E, 0x1770},
+	{0x1772, 0x1773}, {0x1780, 0x17DD}, {0x17E0, 0x17E9},
+	{0x17F0, 0x17F9}, {0x1800, 0x180E}, {0x1810, 0x1819},
+	{0x1820, 0x1878}, {0x1880, 0x18AA}, {0x18B0, 0x18F5},
+	{0x1900, 0x191E}, {0x1920, 0x192B}, {0x1930, 0x193B},
+	{0x1940, 0x1940}, {0x1944, 0x196D}, {0x1970, 0x1974},
+	{0x1980, 0x19AB}, {0x19B0, 0x19C9}, {0x19D0, 0x19DA},
+	{0x19DE, 0x1A1B}, {0x1A1E, 0x1A5E}, {0x1A60, 0x1A7C},
+	{0x1A7F, 0x1A89}, {0x1A90, 0x1A99}, {0x1AA0, 0x1AAD},
+	{0x1AB0, 0x1AC0}, {0x1B00, 0x1B4B}, {0x1B50, 0x1B7C},
+	{0x1B80, 0x1BF3}, {0x1BFC, 0x1C37}, {0x1C3B, 0x1C49},
+	{0x1C4D, 0x1C88}, {0x1C90, 0x1CBA}, {0x1CBD, 0x1CC7},
+	{0x1CD0, 0x1CFA}, {0x1D00, 0x1DF9}, {0x1DFB, 0x1F15},
+	{0x1F18, 0x1F1D}, {0x1F20, 0x1F45}, {0x1F48, 0x1F4D},
+	{0x1F50, 0x1F57}, {0x1F59, 0x1F59}, {0x1F5B, 0x1F5B},
+	{0x1F5D, 0x1F5D}, {0x1F5F, 0x1F7D}, {0x1F80, 0x1FB4},
+	{0x1FB6, 0x1FC4}, {0x1FC6, 0x1FD3}, {0x1FD6, 0x1FDB},
+	{0x1FDD, 0x1FEF}, {0x1FF2, 0x1FF4}, {0x1FF6, 0x1FFE},
+	{0x2000, 0x200F}, {0x2011, 0x2012}, {0x2017, 0x2017},
+	{0x201A, 0x201B}, {0x201E, 0x201F}, {0x2023, 0x2023},
+	{0x2028, 0x202F}, {0x2031, 0x2031}, {0x2034, 0x2034},
+	{0x2036, 0x203A}, {0x203C, 0x203D}, {0x203F, 0x2064},
+	{0x2066, 0x2071}, {0x2075, 0x207E}, {0x2080, 0x2080},
+	{0x2085, 0x208E}, {0x2090, 0x209C}, {0x20A0, 0x20A8},
+	{0x20AA, 0x20AB}, {0x20AD, 0x20BF}, {0x20D0, 0x20F0},
+	{0x2100, 0x2102}, {0x2104, 0x2104}, {0x2106, 0x2108},
+	{0x210A, 0x2112}, {0x2114, 0x2115}, {0x2117, 0x2120},
+	{0x2123, 0x2125}, {0x2127, 0x212A}, {0x212C, 0x2152},
+	{0x2155, 0x215A}, {0x215F, 0x215F}, {0x216C, 0x216F},
+	{0x217A, 0x2188}, {0x218A, 0x218B}, {0x219A, 0x21B7},
+	{0x21BA, 0x21D1}, {0x21D3, 0x21D3}, {0x21D5, 0x21E6},
+	{0x21E8, 0x21FF}, {0x2201, 0x2201}, {0x2204, 0x2206},
+	{0x2209, 0x220A}, {0x220C, 0x220E}, {0x2210, 0x2210},
+	{0x2212, 0x2214}, {0x2216, 0x2219}, {0x221B, 0x221C},
+	{0x2221, 0x2222}, {0x2224, 0x2224}, {0x2226, 0x2226},
+	{0x222D, 0x222D}, {0x222F, 0x2233}, {0x2238, 0x223B},
+	{0x223E, 0x2247}, {0x2249, 0x224B}, {0x224D, 0x2251},
+	{0x2253, 0x225F}, {0x2262, 0x2263}, {0x2268, 0x2269},
+	{0x226C, 0x226D}, {0x2270, 0x2281}, {0x2284, 0x2285},
+	{0x2288, 0x2294}, {0x2296, 0x2298}, {0x229A, 0x22A4},
+	{0x22A6, 0x22BE}, {0x22C0, 0x2311}, {0x2313, 0x2319},
+	{0x231C, 0x2328}, {0x232B, 0x23E8}, {0x23ED, 0x23EF},
+	{0x23F1, 0x23F2}, {0x23F4, 0x2426}, {0x2440, 0x244A},
+	{0x24EA, 0x24EA}, {0x254C, 0x254F}, {0x2574, 0x257F},
+	{0x2590, 0x2591}, {0x2596, 0x259F}, {0x25A2, 0x25A2},
+	{0x25AA, 0x25B1}, {0x25B4, 0x25B5}, {0x25B8, 0x25BB},
+	{0x25BE, 0x25BF}, {0x25C2, 0x25C5}, {0x25C9, 0x25CA},
+	{0x25CC, 0x25CD}, {0x25D2, 0x25E1}, {0x25E6, 0x25EE},
+	{0x25F0, 0x25FC}, {0x25FF, 0x2604}, {0x2607, 0x2608},
+	{0x260A, 0x260D}, {0x2610, 0x2613}, {0x2616, 0x261B},
+	{0x261D, 0x261D}, {0x261F, 0x263F}, {0x2641, 0x2641},
+	{0x2643, 0x2647}, {0x2654, 0x265F}, {0x2662, 0x2662},
+	{0x2666, 0x2666}, {0x266B, 0x266B}, {0x266E, 0x266E},
+	{0x2670, 0x267E}, {0x2680, 0x2692}, {0x2694, 0x269D},
+	{0x26A0, 0x26A0}, {0x26A2, 0x26A9}, {0x26AC, 0x26BC},
+	{0x26C0, 0x26C3}, {0x26E2, 0x26E2}, {0x26E4, 0x26E7},
+	{0x2700, 0x2704}, {0x2706, 0x2709}, {0x270C, 0x2727},
+	{0x2729, 0x273C}, {0x273E, 0x274B}, {0x274D, 0x274D},
+	{0x274F, 0x2752}, {0x2756, 0x2756}, {0x2758, 0x2775},
+	{0x2780, 0x2794}, {0x2798, 0x27AF}, {0x27B1, 0x27BE},
+	{0x27C0, 0x27E5}, {0x27EE, 0x2984}, {0x2987, 0x2B1A},
+	{0x2B1D, 0x2B4F}, {0x2B51, 0x2B54}, {0x2B5A, 0x2B73},
+	{0x2B76, 0x2B95}, {0x2B97, 0x2C2E}, {0x2C30, 0x2C5E},
+	{0x2C60, 0x2CF3}, {0x2CF9, 0x2D25}, {0x2D27, 0x2D27},
+	{0x2D2D, 0x2D2D}, {0x2D30, 0x2D67}, {0x2D6F, 0x2D70},
+	{0x2D7F, 0x2D96}, {0x2DA0, 0x2DA6}, {0x2DA8, 0x2DAE},
+	{0x2DB0, 0x2DB6}, {0x2DB8, 0x2DBE}, {0x2DC0, 0x2DC6},
+	{0x2DC8, 0x2DCE}, {0x2DD0, 0x2DD6}, {0x2DD8, 0x2DDE},
+	{0x2DE0, 0x2E52}, {0x303F, 0x303F}, {0x4DC0, 0x4DFF},
+	{0xA4D0, 0xA62B}, {0xA640, 0xA6F7}, {0xA700, 0xA7BF},
+	{0xA7C2, 0xA7CA}, {0xA7F5, 0xA82C}, {0xA830, 0xA839},
+	{0xA840, 0xA877}, {0xA880, 0xA8C5}, {0xA8CE, 0xA8D9},
+	{0xA8E0, 0xA953}, {0xA95F, 0xA95F}, {0xA980, 0xA9CD},
+	{0xA9CF, 0xA9D9}, {0xA9DE, 0xA9FE}, {0xAA00, 0xAA36},
+	{0xAA40, 0xAA4D}, {0xAA50, 0xAA59}, {0xAA5C, 0xAAC2},
+	{0xAADB, 0xAAF6}, {0xAB01, 0xAB06}, {0xAB09, 0xAB0E},
+	{0xAB11, 0xAB16}, {0xAB20, 0xAB26}, {0xAB28, 0xAB2E},
+	{0xAB30, 0xAB6B}, {0xAB70, 0xABED}, {0xABF0, 0xABF9},
+	{0xD7B0, 0xD7C6}, {0xD7CB, 0xD7FB}, {0xD800, 0xDFFF},
+	{0xFB00, 0xFB06}, {0xFB13, 0xFB17}, {0xFB1D, 0xFB36},
+	{0xFB38, 0xFB3C}, {0xFB3E, 0xFB3E}, {0xFB40, 0xFB41},
+	{0xFB43, 0xFB44}, {0xFB46, 0xFBC1}, {0xFBD3, 0xFD3F},
+	{0xFD50, 0xFD8F}, {0xFD92, 0xFDC7}, {0xFDF0, 0xFDFD},
+	{0xFE20, 0xFE2F}, {0xFE70, 0xFE74}, {0xFE76, 0xFEFC},
+	{0xFEFF, 0xFEFF}, {0xFFF9, 0xFFFC}, {0x10000, 0x1000B},
+	{0x1000D, 0x10026}, {0x10028, 0x1003A}, {0x1003C, 0x1003D},
+	{0x1003F, 0x1004D}, {0x10050, 0x1005D}, {0x10080, 0x100FA},
+	{0x10100, 0x10102}, {0x10107, 0x10133}, {0x10137, 0x1018E},
+	{0x10190, 0x1019C}, {0x101A0, 0x101A0}, {0x101D0, 0x101FD},
+	{0x10280, 0x1029C}, {0x102A0, 0x102D0}, {0x102E0, 0x102FB},
+	{0x10300, 0x10323}, {0x1032D, 0x1034A}, {0x10350, 0x1037A},
+	{0x10380, 0x1039D}, {0x1039F, 0x103C3}, {0x103C8, 0x103D5},
+	{0x10400, 0x1049D}, {0x104A0, 0x104A9}, {0x104B0, 0x104D3},
+	{0x104D8, 0x104FB}, {0x10500, 0x10527}, {0x10530, 0x10563},
+	{0x1056F, 0x1056F}, {0x10600, 0x10736}, {0x10740, 0x10755},
+	{0x10760, 0x10767}, {0x10800, 0x10805}, {0x10808, 0x10808},
+	{0x1080A, 0x10835}, {0x10837, 0x10838}, {0x1083C, 0x1083C},
+	{0x1083F, 0x10855}, {0x10857, 0x1089E}, {0x108A7, 0x108AF},
+	{0x108E0, 0x108F2}, {0x108F4, 0x108F5}, {0x108FB, 0x1091B},
+	{0x1091F, 0x10939}, {0x1093F, 0x1093F}, {0x10980, 0x109B7},
+	{0x109BC, 0x109CF}, {0x109D2, 0x10A03}, {0x10A05, 0x10A06},
+	{0x10A0C, 0x10A13}, {0x10A15, 0x10A17}, {0x10A19, 0x10A35},
+	{0x10A38, 0x10A3A}, {0x10A3F, 0x10A48}, {0x10A50, 0x10A58},
+	{0x10A60, 0x10A9F}, {0x10AC0, 0x10AE6}, {0x10AEB, 0x10AF6},
+	{0x10B00, 0x10B35}, {0x10B39, 0x10B55}, {0x10B58, 0x10B72},
+	{0x10B78, 0x10B91}, {0x10B99, 0x10B9C}, {0x10BA9, 0x10BAF},
+	{0x10C00, 0x10C48}, {0x10C80, 0x10CB2}, {0x10CC0, 0x10CF2},
+	{0x10CFA, 0x10D27}, {0x10D30, 0x10D39}, {0x10E60, 0x10E7E},
+	{0x10E80, 0x10EA9}, {0x10EAB, 0x10EAD}, {0x10EB0, 0x10EB1},
+	{0x10F00, 0x10F27}, {0x10F30, 0x10F59}, {0x10FB0, 0x10FCB},
+	{0x10FE0, 0x10FF6}, {0x11000, 0x1104D}, {0x11052, 0x1106F},
+	{0x1107F, 0x110C1}, {0x110CD, 0x110CD}, {0x110D0, 0x110E8},
+	{0x110F0, 0x110F9}, {0x11100, 0x11134}, {0x11136, 0x11147},
+	{0x11150, 0x11176}, {0x11180, 0x111DF}, {0x111E1, 0x111F4},
+	{0x11200, 0x11211}, {0x11213, 0x1123E}, {0x11280, 0x11286},
+	{0x11288, 0x11288}, {0x1128A, 0x1128D}, {0x1128F, 0x1129D},
+	{0x1129F, 0x112A9}, {0x112B0, 0x112EA}, {0x112F0, 0x112F9},
+	{0x11300, 0x11303}, {0x11305, 0x1130C}, {0x1130F, 0x11310},
+	{0x11313, 0x11328}, {0x1132A, 0x11330}, {0x11332, 0x11333},
+	{0x11335, 0x11339}, {0x1133B, 0x11344}, {0x11347, 0x11348},
+	{0x1134B, 0x1134D}, {0x11350, 0x11350}, {0x11357, 0x11357},
+	{0x1135D, 0x11363}, {0x11366, 0x1136C}, {0x11370, 0x11374},
+	{0x11400, 0x1145B}, {0x1145D, 0x11461}, {0x11480, 0x114C7},
+	{0x114D0, 0x114D9}, {0x11580, 0x115B5}, {0x115B8, 0x115DD},
+	{0x11600, 0x11644}, {0x11650, 0x11659}, {0x11660, 0x1166C},
+	{0x11680, 0x116B8}, {0x116C0, 0x116C9}, {0x11700, 0x1171A},
+	{0x1171D, 0x1172B}, {0x11730, 0x1173F}, {0x11800, 0x1183B},
+	{0x118A0, 0x118F2}, {0x118FF, 0x11906}, {0x11909, 0x11909},
+	{0x1190C, 0x11913}, {0x11915, 0x11916}, {0x11918, 0x11935},
+	{0x11937, 0x11938}, {0x1193B, 0x11946}, {0x11950, 0x11959},
+	{0x119A0, 0x119A7}, {0x119AA, 0x119D7}, {0x119DA, 0x119E4},
+	{0x11A00, 0x11A47}, {0x11A50, 0x11AA2}, {0x11AC0, 0x11AF8},
+	{0x11C00, 0x11C08}, {0x11C0A, 0x11C36}, {0x11C38, 0x11C45},
+	{0x11C50, 0x11C6C}, {0x11C70, 0x11C8F}, {0x11C92, 0x11CA7},
+	{0x11CA9, 0x11CB6}, {0x11D00, 0x11D06}, {0x11D08, 0x11D09},
+	{0x11D0B, 0x11D36}, {0x11D3A, 0x11D3A}, {0x11D3C, 0x11D3D},
+	{0x11D3F, 0x11D47}, {0x11D50, 0x11D59}, {0x11D60, 0x11D65},
+	{0x11D67, 0x11D68}, {0x11D6A, 0x11D8E}, {0x11D90, 0x11D91},
+	{0x11D93, 0x11D98}, {0x11DA0, 0x11DA9}, {0x11EE0, 0x11EF8},
+	{0x11FB0, 0x11FB0}, {0x11FC0, 0x11FF1}, {0x11FFF, 0x12399},
+	{0x12400, 0x1246E}, {0x12470, 0x12474}, {0x12480, 0x12543},
+	{0x13000, 0x1342E}, {0x13430, 0x13438}, {0x14400, 0x14646},
+	{0x16800, 0x16A38}, {0x16A40, 0x16A5E}, {0x16A60, 0x16A69},
+	{0x16A6E, 0x16A6F}, {0x16AD0, 0x16AED}, {0x16AF0, 0x16AF5},
+	{0x16B00, 0x16B45}, {0x16B50, 0x16B59}, {0x16B5B, 0x16B61},
+	{0x16B63, 0x16B77}, {0x16B7D, 0x16B8F}, {0x16E40, 0x16E9A},
+	{0x16F00, 0x16F4A}, {0x16F4F, 0x16F87}, {0x16F8F, 0x16F9F},
+	{0x1BC00, 0x1BC6A}, {0x1BC70, 0x1BC7C}, {0x1BC80, 0x1BC88},
+	{0x1BC90, 0x1BC99}, {0x1BC9C, 0x1BCA3}, {0x1D000, 0x1D0F5},
+	{0x1D100, 0x1D126}, {0x1D129, 0x1D1E8}, {0x1D200, 0x1D245},
+	{0x1D2E0, 0x1D2F3}, {0x1D300, 0x1D356}, {0x1D360, 0x1D378},
+	{0x1D400, 0x1D454}, {0x1D456, 0x1D49C}, {0x1D49E, 0x1D49F},
+	{0x1D4A2, 0x1D4A2}, {0x1D4A5, 0x1D4A6}, {0x1D4A9, 0x1D4AC},
+	{0x1D4AE, 0x1D4B9}, {0x1D4BB, 0x1D4BB}, {0x1D4BD, 0x1D4C3},
+	{0x1D4C5, 0x1D505}, {0x1D507, 0x1D50A}, {0x1D50D, 0x1D514},
+	{0x1D516, 0x1D51C}, {0x1D51E, 0x1D539}, {0x1D53B, 0x1D53E},
+	{0x1D540, 0x1D544}, {0x1D546, 0x1D546}, {0x1D54A, 0x1D550},
+	{0x1D552, 0x1D6A5}, {0x1D6A8, 0x1D7CB}, {0x1D7CE, 0x1DA8B},
+	{0x1DA9B, 0x1DA9F}, {0x1DAA1, 0x1DAAF}, {0x1E000, 0x1E006},
+	{0x1E008, 0x1E018}, {0x1E01B, 0x1E021}, {0x1E023, 0x1E024},
+	{0x1E026, 0x1E02A}, {0x1E100, 0x1E12C}, {0x1E130, 0x1E13D},
+	{0x1E140, 0x1E149}, {0x1E14E, 0x1E14F}, {0x1E2C0, 0x1E2F9},
+	{0x1E2FF, 0x1E2FF}, {0x1E800, 0x1E8C4}, {0x1E8C7, 0x1E8D6},
+	{0x1E900, 0x1E94B}, {0x1E950, 0x1E959}, {0x1E95E, 0x1E95F},
+	{0x1EC71, 0x1ECB4}, {0x1ED01, 0x1ED3D}, {0x1EE00, 0x1EE03},
+	{0x1EE05, 0x1EE1F}, {0x1EE21, 0x1EE22}, {0x1EE24, 0x1EE24},
+	{0x1EE27, 0x1EE27}, {0x1EE29, 0x1EE32}, {0x1EE34, 0x1EE37},
+	{0x1EE39, 0x1EE39}, {0x1EE3B, 0x1EE3B}, {0x1EE42, 0x1EE42},
+	{0x1EE47, 0x1EE47}, {0x1EE49, 0x1EE49}, {0x1EE4B, 0x1EE4B},
+	{0x1EE4D, 0x1EE4F}, {0x1EE51, 0x1EE52}, {0x1EE54, 0x1EE54},
+	{0x1EE57, 0x1EE57}, {0x1EE59, 0x1EE59}, {0x1EE5B, 0x1EE5B},
+	{0x1EE5D, 0x1EE5D}, {0x1EE5F, 0x1EE5F}, {0x1EE61, 0x1EE62},
+	{0x1EE64, 0x1EE64}, {0x1EE67, 0x1EE6A}, {0x1EE6C, 0x1EE72},
+	{0x1EE74, 0x1EE77}, {0x1EE79, 0x1EE7C}, {0x1EE7E, 0x1EE7E},
+	{0x1EE80, 0x1EE89}, {0x1EE8B, 0x1EE9B}, {0x1EEA1, 0x1EEA3},
+	{0x1EEA5, 0x1EEA9}, {0x1EEAB, 0x1EEBB}, {0x1EEF0, 0x1EEF1},
+	{0x1F000, 0x1F003}, {0x1F005, 0x1F02B}, {0x1F030, 0x1F093},
+	{0x1F0A0, 0x1F0AE}, {0x1F0B1, 0x1F0BF}, {0x1F0C1, 0x1F0CE},
+	{0x1F0D1, 0x1F0F5}, {0x1F10B, 0x1F10F}, {0x1F12E, 0x1F12F},
+	{0x1F16A, 0x1F16F}, {0x1F1AD, 0x1F1AD}, {0x1F1E6, 0x1F1FF},
+	{0x1F321, 0x1F32C}, {0x1F336, 0x1F336}, {0x1F37D, 0x1F37D},
+	{0x1F394, 0x1F39F}, {0x1F3CB, 0x1F3CE}, {0x1F3D4, 0x1F3DF},
+	{0x1F3F1, 0x1F3F3}, {0x1F3F5, 0x1F3F7}, {0x1F43F, 0x1F43F},
+	{0x1F441, 0x1F441}, {0x1F4FD, 0x1F4FE}, {0x1F53E, 0x1F54A},
+	{0x1F54F, 0x1F54F}, {0x1F568, 0x1F579}, {0x1F57B, 0x1F594},
+	{0x1F597, 0x1F5A3}, {0x1F5A5, 0x1F5FA}, {0x1F650, 0x1F67F},
+	{0x1F6C6, 0x1F6CB}, {0x1F6CD, 0x1F6CF}, {0x1F6D3, 0x1F6D4},
+	{0x1F6E0, 0x1F6EA}, {0x1F6F0, 0x1F6F3}, {0x1F700, 0x1F773},
+	{0x1F780, 0x1F7D8}, {0x1F800, 0x1F80B}, {0x1F810, 0x1F847},
+	{0x1F850, 0x1F859}, {0x1F860, 0x1F887}, {0x1F890, 0x1F8AD},
+	{0x1F8B0, 0x1F8B1}, {0x1F900, 0x1F90B}, {0x1F93B, 0x1F93B},
+	{0x1F946, 0x1F946}, {0x1FA00, 0x1FA53}, {0x1FA60, 0x1FA6D},
+	{0x1FB00, 0x1FB92}, {0x1FB94, 0x1FBCA}, {0x1FBF0, 0x1FBF9},
+	{0xE0001, 0xE0001}, {0xE0020, 0xE007F},
+}
+
+var emoji = table{
+	{0x203C, 0x203C}, {0x2049, 0x2049}, {0x2122, 0x2122},
+	{0x2139, 0x2139}, {0x2194, 0x2199}, {0x21A9, 0x21AA},
+	{0x231A, 0x231B}, {0x2328, 0x2328}, {0x2388, 0x2388},
+	{0x23CF, 0x23CF}, {0x23E9, 0x23F3}, {0x23F8, 0x23FA},
+	{0x24C2, 0x24C2}, {0x25AA, 0x25AB}, {0x25B6, 0x25B6},
+	{0x25C0, 0x25C0}, {0x25FB, 0x25FE}, {0x2600, 0x2605},
+	{0x2607, 0x2612}, {0x2614, 0x2685}, {0x2690, 0x2705},
+	{0x2708, 0x2712}, {0x2714, 0x2714}, {0x2716, 0x2716},
+	{0x271D, 0x271D}, {0x2721, 0x2721}, {0x2728, 0x2728},
+	{0x2733, 0x2734}, {0x2744, 0x2744}, {0x2747, 0x2747},
+	{0x274C, 0x274C}, {0x274E, 0x274E}, {0x2753, 0x2755},
+	{0x2757, 0x2757}, {0x2763, 0x2767}, {0x2795, 0x2797},
+	{0x27A1, 0x27A1}, {0x27B0, 0x27B0}, {0x27BF, 0x27BF},
+	{0x2934, 0x2935}, {0x2B05, 0x2B07}, {0x2B1B, 0x2B1C},
+	{0x2B50, 0x2B50}, {0x2B55, 0x2B55}, {0x3030, 0x3030},
+	{0x303D, 0x303D}, {0x3297, 0x3297}, {0x3299, 0x3299},
+	{0x1F000, 0x1F0FF}, {0x1F10D, 0x1F10F}, {0x1F12F, 0x1F12F},
+	{0x1F16C, 0x1F171}, {0x1F17E, 0x1F17F}, {0x1F18E, 0x1F18E},
+	{0x1F191, 0x1F19A}, {0x1F1AD, 0x1F1E5}, {0x1F201, 0x1F20F},
+	{0x1F21A, 0x1F21A}, {0x1F22F, 0x1F22F}, {0x1F232, 0x1F23A},
+	{0x1F23C, 0x1F23F}, {0x1F249, 0x1F3FA}, {0x1F400, 0x1F53D},
+	{0x1F546, 0x1F64F}, {0x1F680, 0x1F6FF}, {0x1F774, 0x1F77F},
+	{0x1F7D5, 0x1F7FF}, {0x1F80C, 0x1F80F}, {0x1F848, 0x1F84F},
+	{0x1F85A, 0x1F85F}, {0x1F888, 0x1F88F}, {0x1F8AE, 0x1F8FF},
+	{0x1F90C, 0x1F93A}, {0x1F93C, 0x1F945}, {0x1F947, 0x1FAFF},
+	{0x1FC00, 0x1FFFD},
+}
diff --git a/vendor/github.com/mattn/go-runewidth/runewidth_windows.go b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
new file mode 100644
index 0000000..d6a6177
--- /dev/null
+++ b/vendor/github.com/mattn/go-runewidth/runewidth_windows.go
@@ -0,0 +1,28 @@
+// +build windows
+// +build !appengine
+
+package runewidth
+
+import (
+	"syscall"
+)
+
+var (
+	kernel32               = syscall.NewLazyDLL("kernel32")
+	procGetConsoleOutputCP = kernel32.NewProc("GetConsoleOutputCP")
+)
+
+// IsEastAsian return true if the current locale is CJK
+func IsEastAsian() bool {
+	r1, _, _ := procGetConsoleOutputCP.Call()
+	if r1 == 0 {
+		return false
+	}
+
+	switch int(r1) {
+	case 932, 51932, 936, 949, 950:
+		return true
+	}
+
+	return false
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
new file mode 100644
index 0000000..e4b1fff
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/client.go
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"time"
+)
+
+const (
+	// Maximum channel buffer between publisher/subscriber goroutines
+	maxClientChannelBufferSize = 10
+)
+
+// These constants represent the event types returned by the KV client
+const (
+	PUT = iota
+	DELETE
+	CONNECTIONDOWN
+	UNKNOWN
+)
+
+// KVPair is a common wrapper for key-value pairs returned from the KV store
+type KVPair struct {
+	Key     string
+	Value   interface{}
+	Version int64
+	Session string
+	Lease   int64
+}
+
+// NewKVPair creates a new KVPair object
+func NewKVPair(key string, value interface{}, session string, lease int64, version int64) *KVPair {
+	kv := new(KVPair)
+	kv.Key = key
+	kv.Value = value
+	kv.Session = session
+	kv.Lease = lease
+	kv.Version = version
+	return kv
+}
+
+// Event is generated by the KV client when a key change is detected
+type Event struct {
+	EventType int
+	Key       interface{}
+	Value     interface{}
+	Version   int64
+}
+
+// NewEvent creates a new Event object
+func NewEvent(eventType int, key interface{}, value interface{}, version int64) *Event {
+	evnt := new(Event)
+	evnt.EventType = eventType
+	evnt.Key = key
+	evnt.Value = value
+	evnt.Version = version
+
+	return evnt
+}
+
+// Client represents the set of APIs a KV Client must implement
+type Client interface {
+	List(ctx context.Context, key string) (map[string]*KVPair, error)
+	Get(ctx context.Context, key string) (*KVPair, error)
+	Put(ctx context.Context, key string, value interface{}) error
+	Delete(ctx context.Context, key string) error
+	DeleteWithPrefix(ctx context.Context, prefixKey string) error
+	Watch(ctx context.Context, key string, withPrefix bool) chan *Event
+	IsConnectionUp(ctx context.Context) bool // timeout in second
+	CloseWatch(ctx context.Context, key string, ch chan *Event)
+	Close(ctx context.Context)
+
+	// These APIs are not used.  They will be cleaned up in release Voltha 2.9.
+	// It's not cleaned now to limit changes in all components
+	Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error)
+	ReleaseReservation(ctx context.Context, key string) error
+	ReleaseAllReservations(ctx context.Context) error
+	RenewReservation(ctx context.Context, key string) error
+	AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error
+	ReleaseLock(lockName string) error
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/common.go
new file mode 100644
index 0000000..8ac2a4a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
new file mode 100644
index 0000000..6ca5329
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdclient.go
@@ -0,0 +1,474 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"os"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	v3Client "go.etcd.io/etcd/clientv3"
+	v3rpcTypes "go.etcd.io/etcd/etcdserver/api/v3rpc/rpctypes"
+)
+
+const (
+	poolCapacityEnvName = "VOLTHA_ETCD_CLIENT_POOL_CAPACITY"
+	maxUsageEnvName     = "VOLTHA_ETCD_CLIENT_MAX_USAGE"
+)
+
+const (
+	defaultMaxPoolCapacity = 1000 // Default size of an Etcd Client pool
+	defaultMaxPoolUsage    = 100  // Maximum concurrent request an Etcd Client is allowed to process
+)
+
+// EtcdClient represents the Etcd KV store client
+type EtcdClient struct {
+	pool               EtcdClientAllocator
+	watchedChannels    sync.Map
+	watchedClients     map[string]*v3Client.Client
+	watchedClientsLock sync.RWMutex
+}
+
+// NewEtcdCustomClient returns a new client for the Etcd KV store allowing
+// the called to specify etcd client configuration
+func NewEtcdCustomClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	// Get the capacity and max usage from the environment
+	capacity := defaultMaxPoolCapacity
+	maxUsage := defaultMaxPoolUsage
+	if capacityStr, present := os.LookupEnv(poolCapacityEnvName); present {
+		if val, err := strconv.Atoi(capacityStr); err == nil {
+			capacity = val
+			logger.Infow(ctx, "env-variable-set", log.Fields{"pool-capacity": capacity})
+		} else {
+			logger.Warnw(ctx, "invalid-capacity-value", log.Fields{"error": err, "capacity": capacityStr})
+		}
+	}
+	if maxUsageStr, present := os.LookupEnv(maxUsageEnvName); present {
+		if val, err := strconv.Atoi(maxUsageStr); err == nil {
+			maxUsage = val
+			logger.Infow(ctx, "env-variable-set", log.Fields{"max-usage": maxUsage})
+		} else {
+			logger.Warnw(ctx, "invalid-max-usage-value", log.Fields{"error": err, "max-usage": maxUsageStr})
+		}
+	}
+
+	var err error
+
+	pool, err := NewRoundRobinEtcdClientAllocator([]string{addr}, timeout, capacity, maxUsage, level)
+	if err != nil {
+		logger.Errorw(ctx, "failed-to-create-rr-client", log.Fields{
+			"error": err,
+		})
+	}
+
+	logger.Infow(ctx, "etcd-pool-created", log.Fields{"capacity": capacity, "max-usage": maxUsage})
+
+	return &EtcdClient{pool: pool,
+		watchedClients: make(map[string]*v3Client.Client),
+	}, nil
+}
+
+// NewEtcdClient returns a new client for the Etcd KV store
+func NewEtcdClient(ctx context.Context, addr string, timeout time.Duration, level log.LogLevel) (*EtcdClient, error) {
+	return NewEtcdCustomClient(ctx, addr, timeout, level)
+}
+
+// IsConnectionUp returns whether the connection to the Etcd KV store is up.  If a timeout occurs then
+// it is assumed the connection is down or unreachable.
+func (c *EtcdClient) IsConnectionUp(ctx context.Context) bool {
+	// Let's try to get a non existent key.  If the connection is up then there will be no error returned.
+	if _, err := c.Get(ctx, "non-existent-key"); err != nil {
+		return false
+	}
+	return true
+}
+
+// List returns an array of key-value pairs with key as a prefix.  Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer c.pool.Put(client)
+	resp, err := client.Get(ctx, key, v3Client.WithPrefix())
+
+	if err != nil {
+		logger.Error(ctx, err)
+		return nil, err
+	}
+	m := make(map[string]*KVPair)
+	for _, ev := range resp.Kvs {
+		m[string(ev.Key)] = NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version)
+	}
+	return m, nil
+}
+
+// Get returns a key-value pair for a given key. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Get(ctx context.Context, key string) (*KVPair, error) {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return nil, err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+
+startLoop:
+	for {
+		resp, err := client.Get(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "get-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return nil, err
+				}
+				logger.Warnw(ctx, "retrying-get", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return nil, err
+		}
+
+		for _, ev := range resp.Kvs {
+			// Only one value is returned
+			return NewKVPair(string(ev.Key), ev.Value, "", ev.Lease, ev.Version), nil
+		}
+		return nil, nil
+	}
+}
+
+// Put writes a key-value pair to the KV store.  Value can only be a string or []byte since the etcd API
+// accepts only a string as a value for a put operation. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var err error
+	if val, err = ToString(value); err != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+startLoop:
+	for {
+		_, err = client.Put(ctx, key, val)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "put-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-put", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		return nil
+	}
+}
+
+// Delete removes a key from the KV store. Timeout defines how long the function will
+// wait for a response
+func (c *EtcdClient) Delete(ctx context.Context, key string) error {
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	attempt := 0
+startLoop:
+	for {
+		_, err = client.Delete(ctx, key)
+		if err != nil {
+			switch err {
+			case context.Canceled:
+				logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+			case context.DeadlineExceeded:
+				logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err, "context": ctx})
+			case v3rpcTypes.ErrEmptyKey:
+				logger.Warnw(ctx, "etcd-client-error", log.Fields{"error": err})
+			case v3rpcTypes.ErrLeaderChanged,
+				v3rpcTypes.ErrGRPCNoLeader,
+				v3rpcTypes.ErrTimeout,
+				v3rpcTypes.ErrTimeoutDueToLeaderFail,
+				v3rpcTypes.ErrTimeoutDueToConnectionLost:
+				// Retry for these server errors
+				attempt += 1
+				if er := backoff(ctx, attempt); er != nil {
+					logger.Warnw(ctx, "delete-retries-failed", log.Fields{"key": key, "error": er, "attempt": attempt})
+					return err
+				}
+				logger.Warnw(ctx, "retrying-delete", log.Fields{"key": key, "error": err, "attempt": attempt})
+				goto startLoop
+			default:
+				logger.Warnw(ctx, "etcd-server-error", log.Fields{"error": err})
+			}
+			return err
+		}
+		logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+		return nil
+	}
+}
+
+func (c *EtcdClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+
+	client, err := c.pool.Get(ctx)
+	if err != nil {
+		return err
+	}
+	defer c.pool.Put(client)
+
+	//delete the prefix
+	if _, err := client.Delete(ctx, prefixKey, v3Client.WithPrefix()); err != nil {
+		logger.Errorw(ctx, "failed-to-delete-prefix-key", log.Fields{"key": prefixKey, "error": err})
+		return err
+	}
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": prefixKey})
+	return nil
+}
+
+// Watch provides the watch capability on a given key.  It returns a channel onto which the callee needs to
+// listen to receive Events.
+func (c *EtcdClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+	var err error
+	// Reuse the Etcd client when multiple callees are watching the same key.
+	c.watchedClientsLock.Lock()
+	client, exist := c.watchedClients[key]
+	if !exist {
+		client, err = c.pool.Get(ctx)
+		if err != nil {
+			logger.Errorw(ctx, "failed-to-an-etcd-client", log.Fields{"key": key, "error": err})
+			c.watchedClientsLock.Unlock()
+			return nil
+		}
+		c.watchedClients[key] = client
+	}
+	c.watchedClientsLock.Unlock()
+
+	w := v3Client.NewWatcher(client)
+	ctx, cancel := context.WithCancel(ctx)
+	var channel v3Client.WatchChan
+	if withPrefix {
+		channel = w.Watch(ctx, key, v3Client.WithPrefix())
+	} else {
+		channel = w.Watch(ctx, key)
+	}
+
+	// Create a new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]v3Client.Watcher)
+	channelMap[ch] = w
+	channelMaps := c.addChannelMap(key, channelMap)
+
+	// Changing the log field (from channelMaps) as the underlying logger cannot format the map of channels into a
+	// json format.
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(ctx, channel, ch, cancel)
+
+	return ch
+
+}
+
+func (c *EtcdClient) addChannelMap(key string, channelMap map[chan *Event]v3Client.Watcher) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher), channelMap)
+	} else {
+		channels = []map[chan *Event]v3Client.Watcher{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) removeChannelMap(key string, pos int) []map[chan *Event]v3Client.Watcher {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]v3Client.Watcher)[:pos], channels.([]map[chan *Event]v3Client.Watcher)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher)
+}
+
+func (c *EtcdClient) getChannelMaps(key string) ([]map[chan *Event]v3Client.Watcher, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]v3Client.Watcher), exists
+}
+
+// CloseWatch closes a specific watch. Both the key and the channel are required when closing a watch as there
+// may be multiple listeners on the same key.  The previously created channel serves as a key
+func (c *EtcdClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]v3Client.Watcher
+	var ok bool
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug(ctx, "channel-found")
+			// Close the etcd watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+
+	// If we don't have any keys being watched then return the Etcd client to the pool
+	if len(channelMaps) == 0 {
+		c.watchedClientsLock.Lock()
+		// Sanity
+		if client, ok := c.watchedClients[key]; ok {
+			c.pool.Put(client)
+			delete(c.watchedClients, key)
+		}
+		c.watchedClientsLock.Unlock()
+	}
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+
+func (c *EtcdClient) listenForKeyChange(ctx context.Context, channel v3Client.WatchChan, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for resp := range channel {
+		for _, ev := range resp.Events {
+			ch <- NewEvent(getEventType(ev), ev.Kv.Key, ev.Kv.Value, ev.Kv.Version)
+		}
+	}
+	logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getEventType(event *v3Client.Event) int {
+	switch event.Type {
+	case v3Client.EventTypePut:
+		return PUT
+	case v3Client.EventTypeDelete:
+		return DELETE
+	}
+	return UNKNOWN
+}
+
+// Close closes all the connection in the pool store client
+func (c *EtcdClient) Close(ctx context.Context) {
+	logger.Debug(ctx, "closing-etcd-pool")
+	c.pool.Close(ctx)
+}
+
+// The APIs below are not used
+var errUnimplemented = errors.New("deprecated")
+
+// Reserve is deprecated
+func (c *EtcdClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+	return nil, errUnimplemented
+}
+
+// ReleaseAllReservations is deprecated
+func (c *EtcdClient) ReleaseAllReservations(ctx context.Context) error {
+	return errUnimplemented
+}
+
+// ReleaseReservation is deprecated
+func (c *EtcdClient) ReleaseReservation(ctx context.Context, key string) error {
+	return errUnimplemented
+}
+
+// RenewReservation is deprecated
+func (c *EtcdClient) RenewReservation(ctx context.Context, key string) error {
+	return errUnimplemented
+}
+
+// AcquireLock is deprecated
+func (c *EtcdClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+	return errUnimplemented
+}
+
+// ReleaseLock is deprecated
+func (c *EtcdClient) ReleaseLock(lockName string) error {
+	return errUnimplemented
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
new file mode 100644
index 0000000..4d33c27
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/etcdpool.go
@@ -0,0 +1,240 @@
+/*
+ * Copyright 2021-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"container/list"
+	"context"
+	"errors"
+	"sync"
+	"time"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	"go.etcd.io/etcd/clientv3"
+)
+
+// EtcdClientAllocator represents a generic interface to allocate an Etcd Client
+type EtcdClientAllocator interface {
+	Get(context.Context) (*clientv3.Client, error)
+	Put(*clientv3.Client)
+	Close(ctx context.Context)
+}
+
+// NewRoundRobinEtcdClientAllocator creates a new ETCD Client Allocator using a Round Robin scheme
+func NewRoundRobinEtcdClientAllocator(endpoints []string, timeout time.Duration, capacity, maxUsage int, level log.LogLevel) (EtcdClientAllocator, error) {
+	return &roundRobin{
+		all:       make(map[*clientv3.Client]*rrEntry),
+		full:      make(map[*clientv3.Client]*rrEntry),
+		waitList:  list.New(),
+		max:       maxUsage,
+		capacity:  capacity,
+		timeout:   timeout,
+		endpoints: endpoints,
+		logLevel:  level,
+		closingCh: make(chan struct{}, capacity*maxUsage),
+		stopCh:    make(chan struct{}),
+	}, nil
+}
+
+type rrEntry struct {
+	client *clientv3.Client
+	count  int
+	age    time.Time
+}
+
+type roundRobin struct {
+	//block chan struct{}
+	sync.Mutex
+	available []*rrEntry
+	all       map[*clientv3.Client]*rrEntry
+	full      map[*clientv3.Client]*rrEntry
+	waitList  *list.List
+	max       int
+	capacity  int
+	timeout   time.Duration
+	//ageOut    time.Duration
+	endpoints []string
+	size      int
+	logLevel  log.LogLevel
+	closing   bool
+	closingCh chan struct{}
+	stopCh    chan struct{}
+}
+
+// Get returns an Etcd client. If not is available, it will create one
+// until the maximum allowed capacity.  If maximum capacity has been
+// reached then it will wait until s used one is freed.
+func (r *roundRobin) Get(ctx context.Context) (*clientv3.Client, error) {
+	r.Lock()
+
+	if r.closing {
+		r.Unlock()
+		return nil, errors.New("pool-is-closing")
+	}
+
+	// first determine if we need to block, which would mean the
+	// available queue is empty and we are at capacity
+	if len(r.available) == 0 && r.size >= r.capacity {
+
+		// create a channel on which to wait and
+		// add it to the list
+		ch := make(chan struct{})
+		element := r.waitList.PushBack(ch)
+		r.Unlock()
+
+		// block until it is our turn or context
+		// expires or is canceled
+		select {
+		case <-r.stopCh:
+			logger.Info(ctx, "stop-waiting-pool-is-closing")
+			r.waitList.Remove(element)
+			return nil, errors.New("stop-waiting-pool-is-closing")
+		case <-ch:
+			r.waitList.Remove(element)
+		case <-ctx.Done():
+			r.waitList.Remove(element)
+			return nil, ctx.Err()
+		}
+		r.Lock()
+	}
+
+	defer r.Unlock()
+	if len(r.available) > 0 {
+		// pull off back end as it is operationally quicker
+		last := len(r.available) - 1
+		entry := r.available[last]
+		entry.count++
+		if entry.count >= r.max {
+			r.available = r.available[:last]
+			r.full[entry.client] = entry
+		}
+		entry.age = time.Now()
+		return entry.client, nil
+	}
+
+	logConfig := log.ConstructZapConfig(log.JSON, r.logLevel, log.Fields{})
+	// increase capacity
+	client, err := clientv3.New(clientv3.Config{
+		Endpoints:   r.endpoints,
+		DialTimeout: r.timeout,
+		LogConfig:   &logConfig,
+	})
+	if err != nil {
+		return nil, err
+	}
+	entry := &rrEntry{
+		client: client,
+		count:  1,
+	}
+	r.all[entry.client] = entry
+
+	if r.max > 1 {
+		r.available = append(r.available, entry)
+	} else {
+		r.full[entry.client] = entry
+	}
+	r.size++
+	return client, nil
+}
+
+// Put returns the Etcd Client back to the pool
+func (r *roundRobin) Put(client *clientv3.Client) {
+	r.Lock()
+
+	entry := r.all[client]
+	entry.count--
+
+	if r.closing {
+		// Close client if count is 0
+		if entry.count == 0 {
+			if err := entry.client.Close(); err != nil {
+				logger.Warnw(context.Background(), "error-closing-client", log.Fields{"error": err})
+			}
+			delete(r.all, entry.client)
+		}
+		// Notify Close function that a client was returned to the pool
+		r.closingCh <- struct{}{}
+		r.Unlock()
+		return
+	}
+
+	// This entry is now available for use, so
+	// if in full map add it to available and
+	// remove from full
+	if _, ok := r.full[client]; ok {
+		r.available = append(r.available, entry)
+		delete(r.full, client)
+	}
+
+	front := r.waitList.Front()
+	if front != nil {
+		ch := r.waitList.Remove(front)
+		r.Unlock()
+		// need to unblock if someone is waiting
+		ch.(chan struct{}) <- struct{}{}
+		return
+	}
+	r.Unlock()
+}
+
+func (r *roundRobin) Close(ctx context.Context) {
+	r.Lock()
+	r.closing = true
+
+	// Notify anyone waiting for a client to stop waiting
+	close(r.stopCh)
+
+	// Clean-up unused clients
+	for i := 0; i < len(r.available); i++ {
+		// Count 0 means no one is using that client
+		if r.available[i].count == 0 {
+			if err := r.available[i].client.Close(); err != nil {
+				logger.Warnw(ctx, "failure-closing-client", log.Fields{"client": r.available[i].client, "error": err})
+			}
+			// Remove client for all list
+			delete(r.all, r.available[i].client)
+		}
+	}
+
+	// Figure out how many clients are in use
+	numberInUse := 0
+	for _, rrEntry := range r.all {
+		numberInUse += rrEntry.count
+	}
+	r.Unlock()
+
+	if numberInUse == 0 {
+		logger.Info(ctx, "no-connection-in-use")
+		return
+	}
+
+	logger.Infow(ctx, "waiting-for-clients-return", log.Fields{"count": numberInUse})
+
+	// Wait for notifications when a client is returned to the pool
+	for {
+		select {
+		case <-r.closingCh:
+			numberInUse--
+			if numberInUse == 0 {
+				logger.Info(ctx, "all-connections-closed")
+				return
+			}
+		case <-ctx.Done():
+			logger.Warnw(ctx, "context-done", log.Fields{"error": ctx.Err()})
+			return
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/kvutils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/kvutils.go
new file mode 100644
index 0000000..ca57542
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/kvutils.go
@@ -0,0 +1,78 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"fmt"
+	"math"
+	"math/rand"
+	"time"
+)
+
+const (
+	minRetryInterval  = 100
+	maxRetryInterval  = 5000
+	incrementalFactor = 1.2
+	jitter            = 0.2
+)
+
+// ToString converts an interface value to a string.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToString(value interface{}) (string, error) {
+	switch t := value.(type) {
+	case []byte:
+		return string(value.([]byte)), nil
+	case string:
+		return value.(string), nil
+	default:
+		return "", fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// ToByte converts an interface value to a []byte.  The interface should either be of
+// a string type or []byte.  Otherwise, an error is returned.
+func ToByte(value interface{}) ([]byte, error) {
+	switch t := value.(type) {
+	case []byte:
+		return value.([]byte), nil
+	case string:
+		return []byte(value.(string)), nil
+	default:
+		return nil, fmt.Errorf("unexpected-type-%T", t)
+	}
+}
+
+// backoff waits an amount of time that is proportional to the attempt value.  The wait time in a range of
+// minRetryInterval and maxRetryInterval.
+func backoff(ctx context.Context, attempt int) error {
+	if attempt == 0 {
+		return nil
+	}
+	backoff := int(minRetryInterval + incrementalFactor*math.Exp(float64(attempt)))
+	backoff *= 1 + int(jitter*(rand.Float64()*2-1))
+	if backoff > maxRetryInterval {
+		backoff = maxRetryInterval
+	}
+	ticker := time.NewTicker(time.Duration(backoff) * time.Millisecond)
+	defer ticker.Stop()
+	select {
+	case <-ctx.Done():
+		return ctx.Err()
+	case <-ticker.C:
+	}
+	return nil
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
new file mode 100644
index 0000000..decb0a4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/db/kvstore/redisclient.go
@@ -0,0 +1,417 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package kvstore
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"strings"
+	"sync"
+	"time"
+
+	"github.com/go-redis/redis/v8"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+type RedisClient struct {
+	redisAPI            *redis.Client
+	keyReservations     map[string]time.Duration
+	watchedChannels     sync.Map
+	writeLock           sync.Mutex
+	keyReservationsLock sync.RWMutex
+}
+
+func NewRedisClient(addr string, timeout time.Duration, useSentinel bool) (*RedisClient, error) {
+	var r *redis.Client
+	if !useSentinel {
+		r = redis.NewClient(&redis.Options{Addr: addr})
+	} else {
+		// Redis Master-Replicas with Sentinel, sentinel masterSet config
+		//  should be set to sebaRedis
+		r = redis.NewFailoverClient(&redis.FailoverOptions{
+			MasterName:    "sebaRedis",
+			SentinelAddrs: []string{addr},
+		})
+	}
+
+	reservations := make(map[string]time.Duration)
+	return &RedisClient{redisAPI: r, keyReservations: reservations}, nil
+}
+
+func (c *RedisClient) Get(ctx context.Context, key string) (*KVPair, error) {
+
+	val, err := c.redisAPI.Get(ctx, key).Result()
+	valBytes, _ := ToByte(val)
+	if err != nil {
+		return nil, nil
+	}
+	return NewKVPair(key, valBytes, "", 0, 0), nil
+}
+
+func (c *RedisClient) Put(ctx context.Context, key string, value interface{}) error {
+
+	// Validate that we can convert value to a string as etcd API expects a string
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return fmt.Errorf("unexpected-type-%T", value)
+	}
+
+	// Check if there is already a lease for this key - if there is then use it, otherwise a PUT will make
+	// that KV key permanent instead of automatically removing it after a lease expiration
+	setErr := c.redisAPI.Set(ctx, key, val, 0)
+	err := setErr.Err()
+
+	if err != nil {
+		switch setErr.Err() {
+		case context.Canceled:
+			logger.Warnw(ctx, "context-cancelled", log.Fields{"error": err})
+		case context.DeadlineExceeded:
+			logger.Warnw(ctx, "context-deadline-exceeded", log.Fields{"error": err})
+		default:
+			logger.Warnw(ctx, "bad-endpoints", log.Fields{"error": err})
+		}
+		return err
+	}
+	return nil
+}
+
+func (c *RedisClient) scanAllKeysWithPrefix(ctx context.Context, key string) ([]string, error) {
+	var err error
+	allkeys := []string{}
+	cont := true
+	cursor := uint64(0)
+	matchPrefix := key + "*"
+
+	for cont {
+		// search in the first 10000 entries starting from the point indicated by the cursor
+		logger.Debugw(ctx, "redis-scan", log.Fields{"matchPrefix": matchPrefix, "cursor": cursor})
+		var keys []string
+		keys, cursor, err = c.redisAPI.Scan(context.Background(), cursor, matchPrefix, 10000).Result()
+		if err != nil {
+			return nil, err
+		}
+		if cursor == 0 {
+			// all data searched. break the loop
+			logger.Debugw(ctx, "redis-scan-ended", log.Fields{"matchPrefix": matchPrefix, "cursor": cursor})
+			cont = false
+		}
+		if len(keys) == 0 {
+			// no matched data found in this cycle. Continue to search
+			logger.Debugw(ctx, "redis-scan-no-data-found-continue", log.Fields{"matchPrefix": matchPrefix, "cursor": cursor})
+			continue
+		}
+		allkeys = append(allkeys, keys...)
+	}
+	return allkeys, nil
+}
+
+func (c *RedisClient) List(ctx context.Context, key string) (map[string]*KVPair, error) {
+	var err error
+	var keys []string
+	m := make(map[string]*KVPair)
+	var values []interface{}
+
+	if keys, err = c.scanAllKeysWithPrefix(ctx, key); err != nil {
+		return nil, err
+	}
+
+	if len(keys) != 0 {
+		values, err = c.redisAPI.MGet(ctx, keys...).Result()
+		if err != nil {
+			return nil, err
+		}
+	}
+	for i, key := range keys {
+		if valBytes, err := ToByte(values[i]); err == nil {
+			m[key] = NewKVPair(key, interface{}(valBytes), "", 0, 0)
+		}
+	}
+	return m, nil
+}
+
+func (c *RedisClient) Delete(ctx context.Context, key string) error {
+	// delete the key
+	if _, err := c.redisAPI.Del(ctx, key).Result(); err != nil {
+		logger.Errorw(ctx, "failed-to-delete-key", log.Fields{"key": key, "error": err})
+		return err
+	}
+	logger.Debugw(ctx, "key(s)-deleted", log.Fields{"key": key})
+	return nil
+}
+
+func (c *RedisClient) DeleteWithPrefix(ctx context.Context, prefixKey string) error {
+	var keys []string
+	var err error
+	if keys, err = c.scanAllKeysWithPrefix(ctx, prefixKey); err != nil {
+		return err
+	}
+	if len(keys) == 0 {
+		logger.Warn(ctx, "nothing-to-delete-from-kv", log.Fields{"key": prefixKey})
+		return nil
+	}
+	//call delete for keys
+	entryCount := int64(0)
+	start := 0
+	pageSize := 5000
+	length := len(keys)
+	for start < length {
+		end := start + pageSize
+		if end >= length {
+			end = length
+		}
+		keysToDelete := keys[start:end]
+		count := int64(0)
+		if count, err = c.redisAPI.Del(ctx, keysToDelete...).Result(); err != nil {
+			logger.Errorw(ctx, "DeleteWithPrefix method failed", log.Fields{"prefixKey": prefixKey, "numOfMatchedKeys": len(keysToDelete), "err": err})
+			return err
+		}
+		entryCount += count
+		start = end
+	}
+	logger.Debugf(ctx, "%d entries matching with the key prefix %s have been deleted successfully", entryCount, prefixKey)
+	return nil
+}
+
+func (c *RedisClient) Reserve(ctx context.Context, key string, value interface{}, ttl time.Duration) (interface{}, error) {
+	var val string
+	var er error
+	if val, er = ToString(value); er != nil {
+		return nil, fmt.Errorf("unexpected-type%T", value)
+	}
+
+	// SetNX -- Only set the key if it does not already exist.
+	c.redisAPI.SetNX(ctx, key, value, ttl)
+
+	// Check if set is successful
+	redisVal := c.redisAPI.Get(ctx, key).Val()
+	if redisVal == "" {
+		println("NULL")
+		return nil, nil
+	}
+
+	if val == redisVal {
+		// set is successful, return new reservation value
+		c.keyReservationsLock.Lock()
+		c.keyReservations[key] = ttl
+		c.keyReservationsLock.Unlock()
+		bytes, _ := ToByte(val)
+		return bytes, nil
+	} else {
+		// set is not successful, return existing reservation value
+		bytes, _ := ToByte(redisVal)
+		return bytes, nil
+	}
+
+}
+
+func (c *RedisClient) ReleaseReservation(ctx context.Context, key string) error {
+
+	redisVal := c.redisAPI.Get(ctx, key).Val()
+	if redisVal == "" {
+		return nil
+	}
+
+	// Override SetNX value with no TTL
+	_, err := c.redisAPI.Set(ctx, key, redisVal, 0).Result()
+	if err != nil {
+		delete(c.keyReservations, key)
+	} else {
+		return err
+	}
+	return nil
+
+}
+
+func (c *RedisClient) ReleaseAllReservations(ctx context.Context) error {
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+	for key := range c.keyReservations {
+		err := c.ReleaseReservation(ctx, key)
+		if err != nil {
+			logger.Errorw(ctx, "cannot-release-reservation", log.Fields{"key": key, "error": err})
+			return err
+		}
+	}
+	return nil
+}
+
+func (c *RedisClient) RenewReservation(ctx context.Context, key string) error {
+	c.writeLock.Lock()
+	defer c.writeLock.Unlock()
+
+	// Verify the key was reserved
+	ttl, ok := c.keyReservations[key]
+	if !ok {
+		return errors.New("key-not-reserved. Key not found")
+	}
+
+	redisVal := c.redisAPI.Get(ctx, key).Val()
+	if redisVal != "" {
+		c.redisAPI.Set(ctx, key, redisVal, ttl)
+	}
+	return nil
+}
+
+func (c *RedisClient) listenForKeyChange(ctx context.Context, redisCh <-chan *redis.Message, ch chan<- *Event, cancel context.CancelFunc) {
+	logger.Debug(ctx, "start-listening-on-channel ...")
+	defer cancel()
+	defer close(ch)
+	for msg := range redisCh {
+		words := strings.Split(msg.Channel, ":")
+		key := words[1]
+		msgType := getMessageType(msg.Payload)
+		var valBytes []byte
+		if msgType == PUT {
+			ev, _ := c.Get(ctx, key)
+			valBytes, _ = ToByte(ev.Value)
+		}
+		ch <- NewEvent(getMessageType(msg.Payload), []byte(key), valBytes, 0)
+	}
+	logger.Debug(ctx, "stop-listening-on-channel ...")
+}
+
+func getMessageType(msg string) int {
+	isPut := strings.HasSuffix(msg, "set")
+	isDel := strings.HasSuffix(msg, "del")
+	if isPut {
+		return PUT
+	} else if isDel {
+		return DELETE
+	} else {
+		return UNKNOWN
+	}
+}
+
+func (c *RedisClient) addChannelMap(key string, channelMap map[chan *Event]*redis.PubSub) []map[chan *Event]*redis.PubSub {
+
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]*redis.PubSub), channelMap)
+	} else {
+		channels = []map[chan *Event]*redis.PubSub{channelMap}
+	}
+	c.watchedChannels.Store(key, channels)
+
+	return channels.([]map[chan *Event]*redis.PubSub)
+}
+
+func (c *RedisClient) removeChannelMap(key string, pos int) []map[chan *Event]*redis.PubSub {
+	var channels interface{}
+	var exists bool
+
+	if channels, exists = c.watchedChannels.Load(key); exists {
+		channels = append(channels.([]map[chan *Event]*redis.PubSub)[:pos], channels.([]map[chan *Event]*redis.PubSub)[pos+1:]...)
+		c.watchedChannels.Store(key, channels)
+	}
+
+	return channels.([]map[chan *Event]*redis.PubSub)
+}
+
+func (c *RedisClient) getChannelMaps(key string) ([]map[chan *Event]*redis.PubSub, bool) {
+	var channels interface{}
+	var exists bool
+
+	channels, exists = c.watchedChannels.Load(key)
+
+	if channels == nil {
+		return nil, exists
+	}
+
+	return channels.([]map[chan *Event]*redis.PubSub), exists
+}
+
+func (c *RedisClient) Watch(ctx context.Context, key string, withPrefix bool) chan *Event {
+
+	ctx, cancel := context.WithCancel(ctx)
+
+	var subscribePath string
+	subscribePath = "__key*__:" + key
+	if withPrefix {
+		subscribePath += "*"
+	}
+	pubsub := c.redisAPI.PSubscribe(ctx, subscribePath)
+	redisCh := pubsub.Channel()
+
+	// Create new channel
+	ch := make(chan *Event, maxClientChannelBufferSize)
+
+	// Keep track of the created channels so they can be closed when required
+	channelMap := make(map[chan *Event]*redis.PubSub)
+	channelMap[ch] = pubsub
+
+	channelMaps := c.addChannelMap(key, channelMap)
+	logger.Debugw(ctx, "watched-channels", log.Fields{"len": len(channelMaps)})
+
+	// Launch a go routine to listen for updates
+	go c.listenForKeyChange(ctx, redisCh, ch, cancel)
+	return ch
+}
+
+func (c *RedisClient) CloseWatch(ctx context.Context, key string, ch chan *Event) {
+	// Get the array of channels mapping
+	var watchedChannels []map[chan *Event]*redis.PubSub
+	var ok bool
+
+	if watchedChannels, ok = c.getChannelMaps(key); !ok {
+		logger.Warnw(ctx, "key-has-no-watched-channels", log.Fields{"key": key})
+		return
+	}
+	// Look for the channels
+	var pos = -1
+	for i, chMap := range watchedChannels {
+		if t, ok := chMap[ch]; ok {
+			logger.Debug(ctx, "channel-found")
+			// Close the Redis watcher before the client channel.  This should close the etcd channel as well
+			if err := t.Close(); err != nil {
+				logger.Errorw(ctx, "watcher-cannot-be-closed", log.Fields{"key": key, "error": err})
+			}
+			pos = i
+			break
+		}
+	}
+
+	channelMaps, _ := c.getChannelMaps(key)
+	// Remove that entry if present
+	if pos >= 0 {
+		channelMaps = c.removeChannelMap(key, pos)
+	}
+	logger.Infow(ctx, "watcher-channel-exiting", log.Fields{"key": key, "channel": channelMaps})
+}
+func (c *RedisClient) AcquireLock(ctx context.Context, lockName string, timeout time.Duration) error {
+	return nil
+}
+
+func (c *RedisClient) ReleaseLock(lockName string) error {
+	return nil
+}
+
+func (c *RedisClient) IsConnectionUp(ctx context.Context) bool {
+	if _, err := c.redisAPI.Set(ctx, "connection-check", "1", 0).Result(); err != nil {
+		return false
+	}
+	return true
+
+}
+
+func (c *RedisClient) Close(ctx context.Context) {
+	if err := c.redisAPI.Close(); err != nil {
+		logger.Errorw(ctx, "error-closing-client", log.Fields{"error": err})
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/common.go
new file mode 100644
index 0000000..2f5ff92
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
new file mode 100644
index 0000000..46375fa
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/flows/flow_utils.go
@@ -0,0 +1,1658 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package flows
+
+import (
+	"bytes"
+	"context"
+	"crypto/md5"
+	"encoding/binary"
+	"fmt"
+	"hash"
+	"sort"
+	"sync"
+
+	"github.com/cevaris/ordered_map"
+	"github.com/golang/protobuf/proto"
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+	ofp "github.com/opencord/voltha-protos/v5/go/openflow_13"
+)
+
+var (
+	// Instructions shortcut
+	APPLY_ACTIONS  = ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS
+	WRITE_METADATA = ofp.OfpInstructionType_OFPIT_WRITE_METADATA
+	METER_ACTION   = ofp.OfpInstructionType_OFPIT_METER
+
+	//OFPAT_* shortcuts
+	OUTPUT       = ofp.OfpActionType_OFPAT_OUTPUT
+	COPY_TTL_OUT = ofp.OfpActionType_OFPAT_COPY_TTL_OUT
+	COPY_TTL_IN  = ofp.OfpActionType_OFPAT_COPY_TTL_IN
+	SET_MPLS_TTL = ofp.OfpActionType_OFPAT_SET_MPLS_TTL
+	DEC_MPLS_TTL = ofp.OfpActionType_OFPAT_DEC_MPLS_TTL
+	PUSH_VLAN    = ofp.OfpActionType_OFPAT_PUSH_VLAN
+	POP_VLAN     = ofp.OfpActionType_OFPAT_POP_VLAN
+	PUSH_MPLS    = ofp.OfpActionType_OFPAT_PUSH_MPLS
+	POP_MPLS     = ofp.OfpActionType_OFPAT_POP_MPLS
+	SET_QUEUE    = ofp.OfpActionType_OFPAT_SET_QUEUE
+	GROUP        = ofp.OfpActionType_OFPAT_GROUP
+	SET_NW_TTL   = ofp.OfpActionType_OFPAT_SET_NW_TTL
+	NW_TTL       = ofp.OfpActionType_OFPAT_DEC_NW_TTL
+	SET_FIELD    = ofp.OfpActionType_OFPAT_SET_FIELD
+	PUSH_PBB     = ofp.OfpActionType_OFPAT_PUSH_PBB
+	POP_PBB      = ofp.OfpActionType_OFPAT_POP_PBB
+	EXPERIMENTER = ofp.OfpActionType_OFPAT_EXPERIMENTER
+
+	//OFPXMT_OFB_* shortcuts (incomplete)
+	IN_PORT         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+	IN_PHY_PORT     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT
+	METADATA        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_METADATA
+	ETH_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST
+	ETH_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC
+	ETH_TYPE        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE
+	VLAN_VID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID
+	VLAN_PCP        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP
+	IP_DSCP         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP
+	IP_ECN          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN
+	IP_PROTO        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO
+	IPV4_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC
+	IPV4_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST
+	TCP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC
+	TCP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST
+	UDP_SRC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC
+	UDP_DST         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST
+	SCTP_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC
+	SCTP_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST
+	ICMPV4_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE
+	ICMPV4_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE
+	ARP_OP          = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP
+	ARP_SPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA
+	ARP_TPA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA
+	ARP_SHA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA
+	ARP_THA         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA
+	IPV6_SRC        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC
+	IPV6_DST        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST
+	IPV6_FLABEL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL
+	ICMPV6_TYPE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE
+	ICMPV6_CODE     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE
+	IPV6_ND_TARGET  = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET
+	OFB_IPV6_ND_SLL = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL
+	IPV6_ND_TLL     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL
+	MPLS_LABEL      = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL
+	MPLS_TC         = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC
+	MPLS_BOS        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS
+	PBB_ISID        = ofp.OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID
+	TUNNEL_ID       = ofp.OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID
+	IPV6_EXTHDR     = ofp.OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR
+)
+
+//ofp_action_* shortcuts
+
+func Output(port uint32, maxLen ...ofp.OfpControllerMaxLen) *ofp.OfpAction {
+	maxLength := ofp.OfpControllerMaxLen_OFPCML_MAX
+	if len(maxLen) > 0 {
+		maxLength = maxLen[0]
+	}
+	return &ofp.OfpAction{Type: OUTPUT, Action: &ofp.OfpAction_Output{Output: &ofp.OfpActionOutput{Port: port, MaxLen: uint32(maxLength)}}}
+}
+
+func MplsTtl(ttl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: SET_MPLS_TTL, Action: &ofp.OfpAction_MplsTtl{MplsTtl: &ofp.OfpActionMplsTtl{MplsTtl: ttl}}}
+}
+
+func PushVlan(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: PUSH_VLAN, Action: &ofp.OfpAction_Push{Push: &ofp.OfpActionPush{Ethertype: ethType}}}
+}
+
+func PopVlan() *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_VLAN}
+}
+
+func PopMpls(ethType uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: POP_MPLS, Action: &ofp.OfpAction_PopMpls{PopMpls: &ofp.OfpActionPopMpls{Ethertype: ethType}}}
+}
+
+func Group(groupId uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: GROUP, Action: &ofp.OfpAction_Group{Group: &ofp.OfpActionGroup{GroupId: groupId}}}
+}
+
+func NwTtl(nwTtl uint32) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: NW_TTL, Action: &ofp.OfpAction_NwTtl{NwTtl: &ofp.OfpActionNwTtl{NwTtl: nwTtl}}}
+}
+
+func SetField(field *ofp.OfpOxmOfbField) *ofp.OfpAction {
+	actionSetField := &ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: &ofp.OfpOxmField_OfbField{OfbField: field}}
+	return &ofp.OfpAction{Type: SET_FIELD, Action: &ofp.OfpAction_SetField{SetField: &ofp.OfpActionSetField{Field: actionSetField}}}
+}
+
+func Experimenter(experimenter uint32, data []byte) *ofp.OfpAction {
+	return &ofp.OfpAction{Type: EXPERIMENTER, Action: &ofp.OfpAction_Experimenter{Experimenter: &ofp.OfpActionExperimenter{Experimenter: experimenter, Data: data}}}
+}
+
+//ofb_field generators (incomplete set)
+
+func InPort(inPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPort}}
+}
+
+func InPhyPort(inPhyPort uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IN_PHY_PORT, Value: &ofp.OfpOxmOfbField_Port{Port: inPhyPort}}
+}
+
+func Metadata_ofp(tableMetadata uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: METADATA, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: tableMetadata}}
+}
+
+// should Metadata_ofp used here ?????
+func EthDst(ethDst uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_DST, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethDst}}
+}
+
+// should Metadata_ofp used here ?????
+func EthSrc(ethSrc uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_SRC, Value: &ofp.OfpOxmOfbField_TableMetadata{TableMetadata: ethSrc}}
+}
+
+func EthType(ethType uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ETH_TYPE, Value: &ofp.OfpOxmOfbField_EthType{EthType: ethType}}
+}
+
+func VlanVid(vlanVid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_VID, Value: &ofp.OfpOxmOfbField_VlanVid{VlanVid: vlanVid}}
+}
+
+func VlanPcp(vlanPcp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: VLAN_PCP, Value: &ofp.OfpOxmOfbField_VlanPcp{VlanPcp: vlanPcp}}
+}
+
+func IpDscp(ipDscp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_DSCP, Value: &ofp.OfpOxmOfbField_IpDscp{IpDscp: ipDscp}}
+}
+
+func IpEcn(ipEcn uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_ECN, Value: &ofp.OfpOxmOfbField_IpEcn{IpEcn: ipEcn}}
+}
+
+func IpProto(ipProto uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IP_PROTO, Value: &ofp.OfpOxmOfbField_IpProto{IpProto: ipProto}}
+}
+
+func Ipv4Src(ipv4Src uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_SRC, Value: &ofp.OfpOxmOfbField_Ipv4Src{Ipv4Src: ipv4Src}}
+}
+
+func Ipv4Dst(ipv4Dst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV4_DST, Value: &ofp.OfpOxmOfbField_Ipv4Dst{Ipv4Dst: ipv4Dst}}
+}
+
+func TcpSrc(tcpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_SRC, Value: &ofp.OfpOxmOfbField_TcpSrc{TcpSrc: tcpSrc}}
+}
+
+func TcpDst(tcpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TCP_DST, Value: &ofp.OfpOxmOfbField_TcpDst{TcpDst: tcpDst}}
+}
+
+func UdpSrc(udpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_SRC, Value: &ofp.OfpOxmOfbField_UdpSrc{UdpSrc: udpSrc}}
+}
+
+func UdpDst(udpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: UDP_DST, Value: &ofp.OfpOxmOfbField_UdpDst{UdpDst: udpDst}}
+}
+
+func SctpSrc(sctpSrc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_SRC, Value: &ofp.OfpOxmOfbField_SctpSrc{SctpSrc: sctpSrc}}
+}
+
+func SctpDst(sctpDst uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: SCTP_DST, Value: &ofp.OfpOxmOfbField_SctpDst{SctpDst: sctpDst}}
+}
+
+func Icmpv4Type(icmpv4Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv4Type{Icmpv4Type: icmpv4Type}}
+}
+
+func Icmpv4Code(icmpv4Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV4_CODE, Value: &ofp.OfpOxmOfbField_Icmpv4Code{Icmpv4Code: icmpv4Code}}
+}
+
+func ArpOp(arpOp uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_OP, Value: &ofp.OfpOxmOfbField_ArpOp{ArpOp: arpOp}}
+}
+
+func ArpSpa(arpSpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SPA, Value: &ofp.OfpOxmOfbField_ArpSpa{ArpSpa: arpSpa}}
+}
+
+func ArpTpa(arpTpa uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_TPA, Value: &ofp.OfpOxmOfbField_ArpTpa{ArpTpa: arpTpa}}
+}
+
+func ArpSha(arpSha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_SHA, Value: &ofp.OfpOxmOfbField_ArpSha{ArpSha: arpSha}}
+}
+
+func ArpTha(arpTha []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ARP_THA, Value: &ofp.OfpOxmOfbField_ArpTha{ArpTha: arpTha}}
+}
+
+func Ipv6Src(ipv6Src []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_SRC, Value: &ofp.OfpOxmOfbField_Ipv6Src{Ipv6Src: ipv6Src}}
+}
+
+func Ipv6Dst(ipv6Dst []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_DST, Value: &ofp.OfpOxmOfbField_Ipv6Dst{Ipv6Dst: ipv6Dst}}
+}
+
+func Ipv6Flabel(ipv6Flabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_FLABEL, Value: &ofp.OfpOxmOfbField_Ipv6Flabel{Ipv6Flabel: ipv6Flabel}}
+}
+
+func Icmpv6Type(icmpv6Type uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_TYPE, Value: &ofp.OfpOxmOfbField_Icmpv6Type{Icmpv6Type: icmpv6Type}}
+}
+
+func Icmpv6Code(icmpv6Code uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: ICMPV6_CODE, Value: &ofp.OfpOxmOfbField_Icmpv6Code{Icmpv6Code: icmpv6Code}}
+}
+
+func Ipv6NdTarget(ipv6NdTarget []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TARGET, Value: &ofp.OfpOxmOfbField_Ipv6NdTarget{Ipv6NdTarget: ipv6NdTarget}}
+}
+
+func OfbIpv6NdSll(ofbIpv6NdSll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: OFB_IPV6_ND_SLL, Value: &ofp.OfpOxmOfbField_Ipv6NdSsl{Ipv6NdSsl: ofbIpv6NdSll}}
+}
+
+func Ipv6NdTll(ipv6NdTll []byte) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_ND_TLL, Value: &ofp.OfpOxmOfbField_Ipv6NdTll{Ipv6NdTll: ipv6NdTll}}
+}
+
+func MplsLabel(mplsLabel uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_LABEL, Value: &ofp.OfpOxmOfbField_MplsLabel{MplsLabel: mplsLabel}}
+}
+
+func MplsTc(mplsTc uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_TC, Value: &ofp.OfpOxmOfbField_MplsTc{MplsTc: mplsTc}}
+}
+
+func MplsBos(mplsBos uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: MPLS_BOS, Value: &ofp.OfpOxmOfbField_MplsBos{MplsBos: mplsBos}}
+}
+
+func PbbIsid(pbbIsid uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: PBB_ISID, Value: &ofp.OfpOxmOfbField_PbbIsid{PbbIsid: pbbIsid}}
+}
+
+func TunnelId(tunnelId uint64) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: TUNNEL_ID, Value: &ofp.OfpOxmOfbField_TunnelId{TunnelId: tunnelId}}
+}
+
+func Ipv6Exthdr(ipv6Exthdr uint32) *ofp.OfpOxmOfbField {
+	return &ofp.OfpOxmOfbField{Type: IPV6_EXTHDR, Value: &ofp.OfpOxmOfbField_Ipv6Exthdr{Ipv6Exthdr: ipv6Exthdr}}
+}
+
+//frequently used extractors
+
+func excludeAction(action *ofp.OfpAction, exclude ...ofp.OfpActionType) bool {
+	for _, actionToExclude := range exclude {
+		if action.Type == actionToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetActions(flow *ofp.OfpFlowStats, exclude ...ofp.OfpActionType) []*ofp.OfpAction {
+	if flow == nil {
+		return nil
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			if len(exclude) == 0 {
+				return instActions.Actions
+			} else {
+				filteredAction := make([]*ofp.OfpAction, 0)
+				for _, action := range instActions.Actions {
+					if !excludeAction(action, exclude...) {
+						filteredAction = append(filteredAction, action)
+					}
+				}
+				return filteredAction
+			}
+		}
+	}
+	return nil
+}
+
+func UpdateOutputPortByActionType(flow *ofp.OfpFlowStats, actionType uint32, toPort uint32) *ofp.OfpFlowStats {
+	if flow == nil {
+		return nil
+	}
+	nFlow := (proto.Clone(flow)).(*ofp.OfpFlowStats)
+	nFlow.Instructions = nil
+	nInsts := make([]*ofp.OfpInstruction, 0)
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == actionType {
+			instActions := instruction.GetActions()
+			if instActions == nil {
+				return nil
+			}
+			nActions := make([]*ofp.OfpAction, 0)
+			for _, action := range instActions.Actions {
+				if action.GetOutput() != nil {
+					nActions = append(nActions, Output(toPort))
+				} else {
+					nActions = append(nActions, action)
+				}
+			}
+			instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: nActions}}
+			nInsts = append(nInsts, &ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction})
+		} else {
+			nInsts = append(nInsts, instruction)
+		}
+	}
+	nFlow.Instructions = nInsts
+	return nFlow
+}
+
+func excludeOxmOfbField(field *ofp.OfpOxmOfbField, exclude ...ofp.OxmOfbFieldTypes) bool {
+	for _, fieldToExclude := range exclude {
+		if field.Type == fieldToExclude {
+			return true
+		}
+	}
+	return false
+}
+
+func GetOfbFields(flow *ofp.OfpFlowStats, exclude ...ofp.OxmOfbFieldTypes) []*ofp.OfpOxmOfbField {
+	if flow == nil || flow.Match == nil || flow.Match.Type != ofp.OfpMatchType_OFPMT_OXM {
+		return nil
+	}
+	ofbFields := make([]*ofp.OfpOxmOfbField, 0)
+	for _, field := range flow.Match.OxmFields {
+		if field.OxmClass == ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC {
+			ofbFields = append(ofbFields, field.GetOfbField())
+		}
+	}
+	if len(exclude) == 0 {
+		return ofbFields
+	} else {
+		filteredFields := make([]*ofp.OfpOxmOfbField, 0)
+		for _, ofbField := range ofbFields {
+			if !excludeOxmOfbField(ofbField, exclude...) {
+				filteredFields = append(filteredFields, ofbField)
+			}
+		}
+		return filteredFields
+	}
+}
+
+func GetPacketOutPort(packet *ofp.OfpPacketOut) uint32 {
+	if packet == nil {
+		return 0
+	}
+	for _, action := range packet.GetActions() {
+		if action.Type == OUTPUT {
+			return action.GetOutput().Port
+		}
+	}
+	return 0
+}
+
+func GetOutPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == OUTPUT {
+			out := action.GetOutput()
+			if out == nil {
+				return 0
+			}
+			return out.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetInPort(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == IN_PORT {
+			return field.GetPort()
+		}
+	}
+	return 0
+}
+
+func GetGotoTableId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE) {
+			gotoTable := instruction.GetGotoTable()
+			if gotoTable == nil {
+				return 0
+			}
+			return gotoTable.GetTableId()
+		}
+	}
+	return 0
+}
+
+func GetMeterId(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_METER) {
+			MeterInstruction := instruction.GetMeter()
+			if MeterInstruction == nil {
+				return 0
+			}
+			return MeterInstruction.GetMeterId()
+		}
+	}
+	return 0
+}
+
+func GetVlanVid(flow *ofp.OfpFlowStats) *uint32 {
+	if flow == nil {
+		return nil
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == VLAN_VID {
+			ret := field.GetVlanVid()
+			return &ret
+		}
+	}
+	// Dont return 0 if the field is missing as vlan id value 0 has meaning and cannot be overloaded as "not found"
+	return nil
+}
+
+func GetSetActionField(ctx context.Context, flow *ofp.OfpFlowStats, ofbType ofp.OxmOfbFieldTypes) (uint32, bool) {
+	if flow == nil {
+		return 0, false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(APPLY_ACTIONS) {
+			actions := instruction.GetActions()
+			for _, action := range actions.GetActions() {
+				if action.Type == SET_FIELD {
+					setField := action.GetSetField()
+					if setField.Field.GetOfbField().Type == ofbType {
+						switch ofbType {
+						case VLAN_PCP:
+							return setField.Field.GetOfbField().GetVlanPcp(), true
+						case VLAN_VID:
+							return setField.Field.GetOfbField().GetVlanVid(), true
+						default:
+							logger.Errorw(ctx, "unsupported-ofb-field-type", log.Fields{"ofbType": ofbType})
+							return 0, false
+						}
+					}
+				}
+			}
+			return 0, false
+		}
+	}
+	return 0, false
+}
+
+func GetTunnelId(flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == TUNNEL_ID {
+			return field.GetTunnelId()
+		}
+	}
+	return 0
+}
+
+//GetMetaData - legacy get method (only want lower 32 bits)
+func GetMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return uint32(field.GetTableMetadata() & 0xFFFFFFFF)
+		}
+	}
+	logger.Debug(ctx, "No-metadata-present")
+	return 0
+}
+
+func GetMetaData64Bit(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+	if flow == nil {
+		return 0
+	}
+	for _, field := range GetOfbFields(flow) {
+		if field.Type == METADATA {
+			return field.GetTableMetadata()
+		}
+	}
+	logger.Debug(ctx, "No-metadata-present")
+	return 0
+}
+
+// function returns write metadata value from write_metadata action field
+func GetMetadataFromWriteMetadataAction(ctx context.Context, flow *ofp.OfpFlowStats) uint64 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(WRITE_METADATA) {
+				if writeMetadata := instruction.GetWriteMetadata(); writeMetadata != nil {
+					return writeMetadata.GetMetadata()
+				}
+			}
+		}
+	}
+	logger.Debugw(ctx, "No-write-metadata-present", log.Fields{"flow": flow})
+	return 0
+}
+
+func GetTechProfileIDFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
+	/*
+	   Write metadata instruction value (metadata) is 8 bytes:
+	   MS 2 bytes: C Tag
+	   Next 2 bytes: Technology Profile Id
+	   Next 4 bytes: Port number (uni or nni)
+
+	   This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var tpId uint16 = 0
+	logger.Debugw(ctx, "Write metadata value for Techprofile ID", log.Fields{"metadata": metadata})
+	if metadata != 0 {
+		tpId = uint16((metadata >> 32) & 0xFFFF)
+		logger.Debugw(ctx, "Found techprofile ID from write metadata action", log.Fields{"tpid": tpId})
+	}
+	return tpId
+}
+
+func GetEgressPortNumberFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var uniPort uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "Metadata found for egress/uni port ", log.Fields{"metadata": md})
+	if md != 0 {
+		uniPort = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "Found EgressPort from write metadata action", log.Fields{"egress_port": uniPort})
+	}
+	return uniPort
+
+}
+
+func GetInnerTagFromMetaData(ctx context.Context, flow *ofp.OfpFlowStats) uint16 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var innerTag uint16 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	if md != 0 {
+		innerTag = uint16((md >> 48) & 0xFFFF)
+		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+	}
+	return innerTag
+}
+
+func GetInnerTagFromWriteMetaData(ctx context.Context, metadata uint64) uint16 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni)
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var innerTag uint16 = 0
+	if metadata != 0 {
+		innerTag = uint16((metadata >> 48) & 0xFFFF)
+		logger.Debugw(ctx, "Found  CVLAN from write metadate action", log.Fields{"c_vlan": innerTag})
+	}
+	return innerTag
+}
+
+//GetInnerTagFromMetaData retrieves the inner tag from the Metadata_ofp. The port number (UNI on ONU) is in the
+// lower 32-bits of Metadata_ofp and the inner_tag is in the upper 32-bits. This is set in the ONOS OltPipeline as
+//// a Metadata_ofp field
+/*func GetInnerTagFromMetaData(flow *ofp.OfpFlowStats) uint64 {
+	md := GetMetaData64Bit(flow)
+	if md == 0 {
+		return 0
+	}
+	if md <= 0xffffffff {
+		logger.Debugw(ctx, "onos-upgrade-suggested", logger.Fields{"Metadata_ofp": md, "message": "Legacy MetaData detected form OltPipeline"})
+		return md
+	}
+	return (md >> 32) & 0xffffffff
+}*/
+
+// Extract the child device port from a flow that contains the parent device peer port.  Typically the UNI port of an
+// ONU child device.  Per TST agreement this will be the lower 32 bits of tunnel id reserving upper 32 bits for later
+// use
+func GetChildPortFromTunnelId(flow *ofp.OfpFlowStats) uint32 {
+	tid := GetTunnelId(flow)
+	if tid == 0 {
+		return 0
+	}
+	// Per TST agreement we are keeping any child port id (uni port id) in the lower 32 bits
+	return uint32(tid & 0xffffffff)
+}
+
+func HasNextTable(flow *ofp.OfpFlowStats) bool {
+	if flow == nil {
+		return false
+	}
+	return GetGotoTableId(flow) != 0
+}
+
+func GetGroup(flow *ofp.OfpFlowStats) uint32 {
+	if flow == nil {
+		return 0
+	}
+	for _, action := range GetActions(flow) {
+		if action.Type == GROUP {
+			grp := action.GetGroup()
+			if grp == nil {
+				return 0
+			}
+			return grp.GetGroupId()
+		}
+	}
+	return 0
+}
+
+func HasGroup(flow *ofp.OfpFlowStats) bool {
+	return GetGroup(flow) != 0
+}
+
+// GetNextTableId returns the next table ID if the "table_id" is present in the map, otherwise return nil
+func GetNextTableId(kw OfpFlowModArgs) *uint32 {
+	if val, exist := kw["table_id"]; exist {
+		ret := uint32(val)
+		return &ret
+	}
+	return nil
+}
+
+// GetMeterIdFlowModArgs returns the meterId if the "meter_id" is present in the map, otherwise return 0
+func GetMeterIdFlowModArgs(kw OfpFlowModArgs) uint32 {
+	if val, exist := kw["meter_id"]; exist {
+		return uint32(val)
+	}
+	return 0
+}
+
+// Function returns the metadata if the "write_metadata" is present in the map, otherwise return nil
+func GetMetadataFlowModArgs(kw OfpFlowModArgs) uint64 {
+	if val, exist := kw["write_metadata"]; exist {
+		ret := uint64(val)
+		return ret
+	}
+	return 0
+}
+
+// HashFlowStats returns a unique 64-bit integer hash of 'table_id', 'priority', and 'match'
+// The OF spec states that:
+// A flow table entry is identified by its match fields and priority: the match fields
+// and priority taken together identify a unique flow entry in the flow table.
+func HashFlowStats(flow *ofp.OfpFlowStats) (uint64, error) {
+	// first we need to make sure the oxm fields are in a predictable order (the specific order doesn't matter)
+	sort.Slice(flow.Match.OxmFields, func(a, b int) bool {
+		fieldsA, fieldsB := flow.Match.OxmFields[a], flow.Match.OxmFields[b]
+		if fieldsA.OxmClass < fieldsB.OxmClass {
+			return true
+		}
+		switch fieldA := fieldsA.Field.(type) {
+		case *ofp.OfpOxmField_OfbField:
+			switch fieldB := fieldsB.Field.(type) {
+			case *ofp.OfpOxmField_ExperimenterField:
+				return true // ofp < experimenter
+			case *ofp.OfpOxmField_OfbField:
+				return fieldA.OfbField.Type < fieldB.OfbField.Type
+			}
+		case *ofp.OfpOxmField_ExperimenterField:
+			switch fieldB := fieldsB.Field.(type) {
+			case *ofp.OfpOxmField_OfbField:
+				return false // ofp < experimenter
+			case *ofp.OfpOxmField_ExperimenterField:
+				eFieldA, eFieldB := fieldA.ExperimenterField, fieldB.ExperimenterField
+				if eFieldA.Experimenter != eFieldB.Experimenter {
+					return eFieldA.Experimenter < eFieldB.Experimenter
+				}
+				return eFieldA.OxmHeader < eFieldB.OxmHeader
+			}
+		}
+		return false
+	})
+
+	md5Hash := md5.New() // note that write errors will never occur with md5 hashing
+	var tmp [12]byte
+
+	binary.BigEndian.PutUint32(tmp[0:4], flow.TableId)             // tableId
+	binary.BigEndian.PutUint32(tmp[4:8], flow.Priority)            // priority
+	binary.BigEndian.PutUint32(tmp[8:12], uint32(flow.Match.Type)) // match type
+	_, _ = md5Hash.Write(tmp[:12])
+
+	for _, field := range flow.Match.OxmFields { // for all match fields
+		binary.BigEndian.PutUint32(tmp[:4], uint32(field.OxmClass)) // match class
+		_, _ = md5Hash.Write(tmp[:4])
+
+		switch oxmField := field.Field.(type) {
+		case *ofp.OfpOxmField_ExperimenterField:
+			binary.BigEndian.PutUint32(tmp[0:4], oxmField.ExperimenterField.Experimenter)
+			binary.BigEndian.PutUint32(tmp[4:8], oxmField.ExperimenterField.OxmHeader)
+			_, _ = md5Hash.Write(tmp[:8])
+
+		case *ofp.OfpOxmField_OfbField:
+			if err := hashWriteOfbField(md5Hash, oxmField.OfbField); err != nil {
+				return 0, err
+			}
+
+		default:
+			return 0, fmt.Errorf("unknown OfpOxmField type: %T", field.Field)
+		}
+	}
+
+	ret := md5Hash.Sum(nil)
+	return binary.BigEndian.Uint64(ret[0:8]), nil
+}
+
+func hashWriteOfbField(md5Hash hash.Hash, field *ofp.OfpOxmOfbField) error {
+	var tmp [8]byte
+	binary.BigEndian.PutUint32(tmp[:4], uint32(field.Type)) // type
+	_, _ = md5Hash.Write(tmp[:4])
+
+	// value
+	valType, val32, val64, valSlice := uint8(0), uint32(0), uint64(0), []byte(nil)
+	switch val := field.Value.(type) {
+	case *ofp.OfpOxmOfbField_Port:
+		valType, val32 = 4, val.Port
+	case *ofp.OfpOxmOfbField_PhysicalPort:
+		valType, val32 = 4, val.PhysicalPort
+	case *ofp.OfpOxmOfbField_TableMetadata:
+		valType, val64 = 8, val.TableMetadata
+	case *ofp.OfpOxmOfbField_EthDst:
+		valType, valSlice = 1, val.EthDst
+	case *ofp.OfpOxmOfbField_EthSrc:
+		valType, valSlice = 1, val.EthSrc
+	case *ofp.OfpOxmOfbField_EthType:
+		valType, val32 = 4, val.EthType
+	case *ofp.OfpOxmOfbField_VlanVid:
+		valType, val32 = 4, val.VlanVid
+	case *ofp.OfpOxmOfbField_VlanPcp:
+		valType, val32 = 4, val.VlanPcp
+	case *ofp.OfpOxmOfbField_IpDscp:
+		valType, val32 = 4, val.IpDscp
+	case *ofp.OfpOxmOfbField_IpEcn:
+		valType, val32 = 4, val.IpEcn
+	case *ofp.OfpOxmOfbField_IpProto:
+		valType, val32 = 4, val.IpProto
+	case *ofp.OfpOxmOfbField_Ipv4Src:
+		valType, val32 = 4, val.Ipv4Src
+	case *ofp.OfpOxmOfbField_Ipv4Dst:
+		valType, val32 = 4, val.Ipv4Dst
+	case *ofp.OfpOxmOfbField_TcpSrc:
+		valType, val32 = 4, val.TcpSrc
+	case *ofp.OfpOxmOfbField_TcpDst:
+		valType, val32 = 4, val.TcpDst
+	case *ofp.OfpOxmOfbField_UdpSrc:
+		valType, val32 = 4, val.UdpSrc
+	case *ofp.OfpOxmOfbField_UdpDst:
+		valType, val32 = 4, val.UdpDst
+	case *ofp.OfpOxmOfbField_SctpSrc:
+		valType, val32 = 4, val.SctpSrc
+	case *ofp.OfpOxmOfbField_SctpDst:
+		valType, val32 = 4, val.SctpDst
+	case *ofp.OfpOxmOfbField_Icmpv4Type:
+		valType, val32 = 4, val.Icmpv4Type
+	case *ofp.OfpOxmOfbField_Icmpv4Code:
+		valType, val32 = 4, val.Icmpv4Code
+	case *ofp.OfpOxmOfbField_ArpOp:
+		valType, val32 = 4, val.ArpOp
+	case *ofp.OfpOxmOfbField_ArpSpa:
+		valType, val32 = 4, val.ArpSpa
+	case *ofp.OfpOxmOfbField_ArpTpa:
+		valType, val32 = 4, val.ArpTpa
+	case *ofp.OfpOxmOfbField_ArpSha:
+		valType, valSlice = 1, val.ArpSha
+	case *ofp.OfpOxmOfbField_ArpTha:
+		valType, valSlice = 1, val.ArpTha
+	case *ofp.OfpOxmOfbField_Ipv6Src:
+		valType, valSlice = 1, val.Ipv6Src
+	case *ofp.OfpOxmOfbField_Ipv6Dst:
+		valType, valSlice = 1, val.Ipv6Dst
+	case *ofp.OfpOxmOfbField_Ipv6Flabel:
+		valType, val32 = 4, val.Ipv6Flabel
+	case *ofp.OfpOxmOfbField_Icmpv6Type:
+		valType, val32 = 4, val.Icmpv6Type
+	case *ofp.OfpOxmOfbField_Icmpv6Code:
+		valType, val32 = 4, val.Icmpv6Code
+	case *ofp.OfpOxmOfbField_Ipv6NdTarget:
+		valType, valSlice = 1, val.Ipv6NdTarget
+	case *ofp.OfpOxmOfbField_Ipv6NdSsl:
+		valType, valSlice = 1, val.Ipv6NdSsl
+	case *ofp.OfpOxmOfbField_Ipv6NdTll:
+		valType, valSlice = 1, val.Ipv6NdTll
+	case *ofp.OfpOxmOfbField_MplsLabel:
+		valType, val32 = 4, val.MplsLabel
+	case *ofp.OfpOxmOfbField_MplsTc:
+		valType, val32 = 4, val.MplsTc
+	case *ofp.OfpOxmOfbField_MplsBos:
+		valType, val32 = 4, val.MplsBos
+	case *ofp.OfpOxmOfbField_PbbIsid:
+		valType, val32 = 4, val.PbbIsid
+	case *ofp.OfpOxmOfbField_TunnelId:
+		valType, val64 = 8, val.TunnelId
+	case *ofp.OfpOxmOfbField_Ipv6Exthdr:
+		valType, val32 = 4, val.Ipv6Exthdr
+	default:
+		return fmt.Errorf("unknown OfpOxmField value type: %T", val)
+	}
+	switch valType {
+	case 1: // slice
+		_, _ = md5Hash.Write(valSlice)
+	case 4: // uint32
+		binary.BigEndian.PutUint32(tmp[:4], val32)
+		_, _ = md5Hash.Write(tmp[:4])
+	case 8: // uint64
+		binary.BigEndian.PutUint64(tmp[:8], val64)
+		_, _ = md5Hash.Write(tmp[:8])
+	}
+
+	// mask
+	if !field.HasMask {
+		tmp[0] = 0x00
+		_, _ = md5Hash.Write(tmp[:1]) // match hasMask = false
+	} else {
+		tmp[0] = 0x01
+		_, _ = md5Hash.Write(tmp[:1]) // match hasMask = true
+
+		maskType, mask32, mask64, maskSlice := uint8(0), uint32(0), uint64(0), []byte(nil)
+		switch mask := field.Mask.(type) {
+		case *ofp.OfpOxmOfbField_TableMetadataMask:
+			maskType, mask64 = 8, mask.TableMetadataMask
+		case *ofp.OfpOxmOfbField_EthDstMask:
+			maskType, maskSlice = 1, mask.EthDstMask
+		case *ofp.OfpOxmOfbField_EthSrcMask:
+			maskType, maskSlice = 1, mask.EthSrcMask
+		case *ofp.OfpOxmOfbField_VlanVidMask:
+			maskType, mask32 = 4, mask.VlanVidMask
+		case *ofp.OfpOxmOfbField_Ipv4SrcMask:
+			maskType, mask32 = 4, mask.Ipv4SrcMask
+		case *ofp.OfpOxmOfbField_Ipv4DstMask:
+			maskType, mask32 = 4, mask.Ipv4DstMask
+		case *ofp.OfpOxmOfbField_ArpSpaMask:
+			maskType, mask32 = 4, mask.ArpSpaMask
+		case *ofp.OfpOxmOfbField_ArpTpaMask:
+			maskType, mask32 = 4, mask.ArpTpaMask
+		case *ofp.OfpOxmOfbField_Ipv6SrcMask:
+			maskType, maskSlice = 1, mask.Ipv6SrcMask
+		case *ofp.OfpOxmOfbField_Ipv6DstMask:
+			maskType, maskSlice = 1, mask.Ipv6DstMask
+		case *ofp.OfpOxmOfbField_Ipv6FlabelMask:
+			maskType, mask32 = 4, mask.Ipv6FlabelMask
+		case *ofp.OfpOxmOfbField_PbbIsidMask:
+			maskType, mask32 = 4, mask.PbbIsidMask
+		case *ofp.OfpOxmOfbField_TunnelIdMask:
+			maskType, mask64 = 8, mask.TunnelIdMask
+		case *ofp.OfpOxmOfbField_Ipv6ExthdrMask:
+			maskType, mask32 = 4, mask.Ipv6ExthdrMask
+		case nil:
+			return fmt.Errorf("hasMask set to true, but no mask present")
+		default:
+			return fmt.Errorf("unknown OfpOxmField mask type: %T", mask)
+		}
+		switch maskType {
+		case 1: // slice
+			_, _ = md5Hash.Write(maskSlice)
+		case 4: // uint32
+			binary.BigEndian.PutUint32(tmp[:4], mask32)
+			_, _ = md5Hash.Write(tmp[:4])
+		case 8: // uint64
+			binary.BigEndian.PutUint64(tmp[:8], mask64)
+			_, _ = md5Hash.Write(tmp[:8])
+		}
+	}
+	return nil
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func FlowStatsEntryFromFlowModMessage(mod *ofp.OfpFlowMod) (*ofp.OfpFlowStats, error) {
+	flow := &ofp.OfpFlowStats{}
+	if mod == nil {
+		return flow, nil
+	}
+	flow.TableId = mod.TableId
+	flow.Priority = mod.Priority
+	flow.IdleTimeout = mod.IdleTimeout
+	flow.HardTimeout = mod.HardTimeout
+	flow.Flags = mod.Flags
+	flow.Cookie = mod.Cookie
+	flow.Match = mod.Match
+	flow.Instructions = mod.Instructions
+	var err error
+	if flow.Id, err = HashFlowStats(flow); err != nil {
+		return nil, err
+	}
+
+	return flow, nil
+}
+
+func GroupEntryFromGroupMod(mod *ofp.OfpGroupMod) *ofp.OfpGroupEntry {
+	group := &ofp.OfpGroupEntry{}
+	if mod == nil {
+		return group
+	}
+	group.Desc = &ofp.OfpGroupDesc{Type: mod.Type, GroupId: mod.GroupId, Buckets: mod.Buckets}
+	group.Stats = &ofp.OfpGroupStats{GroupId: mod.GroupId}
+	//TODO do we need to instantiate bucket bins?
+	return group
+}
+
+// flowStatsEntryFromFlowModMessage maps an ofp_flow_mod message to an ofp_flow_stats message
+func MeterEntryFromMeterMod(ctx context.Context, meterMod *ofp.OfpMeterMod) *ofp.OfpMeterEntry {
+	bandStats := make([]*ofp.OfpMeterBandStats, 0)
+	meter := &ofp.OfpMeterEntry{Config: &ofp.OfpMeterConfig{},
+		Stats: &ofp.OfpMeterStats{BandStats: bandStats}}
+	if meterMod == nil {
+		logger.Error(ctx, "Invalid meter mod command")
+		return meter
+	}
+	// config init
+	meter.Config.MeterId = meterMod.MeterId
+	meter.Config.Flags = meterMod.Flags
+	meter.Config.Bands = meterMod.Bands
+	// meter stats init
+	meter.Stats.MeterId = meterMod.MeterId
+	meter.Stats.FlowCount = 0
+	meter.Stats.PacketInCount = 0
+	meter.Stats.ByteInCount = 0
+	meter.Stats.DurationSec = 0
+	meter.Stats.DurationNsec = 0
+	// band stats init
+	for range meterMod.Bands {
+		band := &ofp.OfpMeterBandStats{}
+		band.PacketBandCount = 0
+		band.ByteBandCount = 0
+		bandStats = append(bandStats, band)
+	}
+	meter.Stats.BandStats = bandStats
+	logger.Debugw(ctx, "Allocated meter entry", log.Fields{"meter": *meter})
+	return meter
+
+}
+
+func GetMeterIdFromFlow(flow *ofp.OfpFlowStats) uint32 {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					return meterInst.GetMeterId()
+				}
+			}
+		}
+	}
+
+	return uint32(0)
+}
+
+func MkOxmFields(matchFields []ofp.OfpOxmField) []*ofp.OfpOxmField {
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	return oxmFields
+}
+
+func MkInstructionsFromActions(actions []*ofp.OfpAction) []*ofp.OfpInstruction {
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+	return instructions
+}
+
+// Convenience function to generare ofp_flow_mod message with OXM BASIC match composed from the match_fields, and
+// single APPLY_ACTIONS instruction with a list if ofp_action objects.
+func MkSimpleFlowMod(matchFields []*ofp.OfpOxmField, actions []*ofp.OfpAction, command *ofp.OfpFlowModCommand, kw OfpFlowModArgs) *ofp.OfpFlowMod {
+
+	// Process actions instructions
+	instructions := make([]*ofp.OfpInstruction, 0)
+	instructionAction := ofp.OfpInstruction_Actions{Actions: &ofp.OfpInstructionActions{Actions: actions}}
+	instruction := ofp.OfpInstruction{Type: uint32(APPLY_ACTIONS), Data: &instructionAction}
+	instructions = append(instructions, &instruction)
+
+	// Process next table
+	if tableId := GetNextTableId(kw); tableId != nil {
+		var instGotoTable ofp.OfpInstruction_GotoTable
+		instGotoTable.GotoTable = &ofp.OfpInstructionGotoTable{TableId: *tableId}
+		inst := ofp.OfpInstruction{Type: uint32(ofp.OfpInstructionType_OFPIT_GOTO_TABLE), Data: &instGotoTable}
+		instructions = append(instructions, &inst)
+	}
+	// Process meter action
+	if meterId := GetMeterIdFlowModArgs(kw); meterId != 0 {
+		var instMeter ofp.OfpInstruction_Meter
+		instMeter.Meter = &ofp.OfpInstructionMeter{MeterId: meterId}
+		inst := ofp.OfpInstruction{Type: uint32(METER_ACTION), Data: &instMeter}
+		instructions = append(instructions, &inst)
+	}
+	//process write_metadata action
+	if metadata := GetMetadataFlowModArgs(kw); metadata != 0 {
+		var instWriteMetadata ofp.OfpInstruction_WriteMetadata
+		instWriteMetadata.WriteMetadata = &ofp.OfpInstructionWriteMetadata{Metadata: metadata}
+		inst := ofp.OfpInstruction{Type: uint32(WRITE_METADATA), Data: &instWriteMetadata}
+		instructions = append(instructions, &inst)
+	}
+
+	// Process match fields
+	oxmFields := make([]*ofp.OfpOxmField, 0)
+	for _, matchField := range matchFields {
+		oxmField := ofp.OfpOxmField{OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC, Field: matchField.Field}
+		oxmFields = append(oxmFields, &oxmField)
+	}
+	var match ofp.OfpMatch
+	match.Type = ofp.OfpMatchType_OFPMT_OXM
+	match.OxmFields = oxmFields
+
+	// Create ofp_flow_message
+	msg := &ofp.OfpFlowMod{}
+	if command == nil {
+		msg.Command = ofp.OfpFlowModCommand_OFPFC_ADD
+	} else {
+		msg.Command = *command
+	}
+	msg.Instructions = instructions
+	msg.Match = &match
+
+	// Set the variadic argument values
+	msg = setVariadicModAttributes(msg, kw)
+
+	return msg
+}
+
+func MkMulticastGroupMod(groupId uint32, buckets []*ofp.OfpBucket, command *ofp.OfpGroupModCommand) *ofp.OfpGroupMod {
+	group := &ofp.OfpGroupMod{}
+	if command == nil {
+		group.Command = ofp.OfpGroupModCommand_OFPGC_ADD
+	} else {
+		group.Command = *command
+	}
+	group.Type = ofp.OfpGroupType_OFPGT_ALL
+	group.GroupId = groupId
+	group.Buckets = buckets
+	return group
+}
+
+//SetVariadicModAttributes sets only uint64 or uint32 fields of the ofp_flow_mod message
+func setVariadicModAttributes(mod *ofp.OfpFlowMod, args OfpFlowModArgs) *ofp.OfpFlowMod {
+	if args == nil {
+		return mod
+	}
+	for key, val := range args {
+		switch key {
+		case "cookie":
+			mod.Cookie = val
+		case "cookie_mask":
+			mod.CookieMask = val
+		case "table_id":
+			mod.TableId = uint32(val)
+		case "idle_timeout":
+			mod.IdleTimeout = uint32(val)
+		case "hard_timeout":
+			mod.HardTimeout = uint32(val)
+		case "priority":
+			mod.Priority = uint32(val)
+		case "buffer_id":
+			mod.BufferId = uint32(val)
+		case "out_port":
+			mod.OutPort = uint32(val)
+		case "out_group":
+			mod.OutGroup = uint32(val)
+		case "flags":
+			mod.Flags = uint32(val)
+		}
+	}
+	return mod
+}
+
+func MkPacketIn(port uint32, packet []byte) *ofp.OfpPacketIn {
+	packetIn := &ofp.OfpPacketIn{
+		Reason: ofp.OfpPacketInReason_OFPR_ACTION,
+		Match: &ofp.OfpMatch{
+			Type: ofp.OfpMatchType_OFPMT_OXM,
+			OxmFields: []*ofp.OfpOxmField{
+				{
+					OxmClass: ofp.OfpOxmClass_OFPXMC_OPENFLOW_BASIC,
+					Field: &ofp.OfpOxmField_OfbField{
+						OfbField: InPort(port)},
+				},
+			},
+		},
+		Data: packet,
+	}
+	return packetIn
+}
+
+// MkFlowStat is a helper method to build flows
+func MkFlowStat(fa *FlowArgs) (*ofp.OfpFlowStats, error) {
+	//Build the match-fields
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range fa.MatchFields {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return FlowStatsEntryFromFlowModMessage(MkSimpleFlowMod(matchFields, fa.Actions, fa.Command, fa.KV))
+}
+
+func MkGroupStat(ga *GroupArgs) *ofp.OfpGroupEntry {
+	return GroupEntryFromGroupMod(MkMulticastGroupMod(ga.GroupId, ga.Buckets, ga.Command))
+}
+
+type OfpFlowModArgs map[string]uint64
+
+type FlowArgs struct {
+	MatchFields []*ofp.OfpOxmOfbField
+	Actions     []*ofp.OfpAction
+	Command     *ofp.OfpFlowModCommand
+	Priority    uint32
+	KV          OfpFlowModArgs
+}
+
+type GroupArgs struct {
+	GroupId uint32
+	Buckets []*ofp.OfpBucket
+	Command *ofp.OfpGroupModCommand
+}
+
+type FlowsAndGroups struct {
+	Flows  *ordered_map.OrderedMap
+	Groups *ordered_map.OrderedMap
+}
+
+func NewFlowsAndGroups() *FlowsAndGroups {
+	var fg FlowsAndGroups
+	fg.Flows = ordered_map.NewOrderedMap()
+	fg.Groups = ordered_map.NewOrderedMap()
+	return &fg
+}
+
+func (fg *FlowsAndGroups) Copy() *FlowsAndGroups {
+	copyFG := NewFlowsAndGroups()
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			copyFG.Flows.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			copyFG.Groups.Set(kv.Key, proto.Clone(protoMsg))
+		}
+	}
+	return copyFG
+}
+
+func (fg *FlowsAndGroups) GetFlow(index int) *ofp.OfpFlowStats {
+	iter := fg.Flows.IterFunc()
+	pos := 0
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if pos == index {
+			if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+				return protoMsg
+			}
+			return nil
+		}
+		pos += 1
+	}
+	return nil
+}
+
+func (fg *FlowsAndGroups) ListFlows() []*ofp.OfpFlowStats {
+	flows := make([]*ofp.OfpFlowStats, 0)
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			flows = append(flows, protoMsg)
+		}
+	}
+	return flows
+}
+
+func (fg *FlowsAndGroups) ListGroups() []*ofp.OfpGroupEntry {
+	groups := make([]*ofp.OfpGroupEntry, 0)
+	iter := fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			groups = append(groups, protoMsg)
+		}
+	}
+	return groups
+}
+
+func (fg *FlowsAndGroups) String() string {
+	var buffer bytes.Buffer
+	iter := fg.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			buffer.WriteString("\nFlow:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	iter = fg.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			buffer.WriteString("\nGroup:\n")
+			buffer.WriteString(proto.MarshalTextString(protoMsg))
+			buffer.WriteString("\n")
+		}
+	}
+	return buffer.String()
+}
+
+func (fg *FlowsAndGroups) AddFlow(flow *ofp.OfpFlowStats) {
+	if flow == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add flow only if absent
+	if _, exist := fg.Flows.Get(flow.Id); !exist {
+		fg.Flows.Set(flow.Id, flow)
+	}
+}
+
+func (fg *FlowsAndGroups) AddGroup(group *ofp.OfpGroupEntry) {
+	if group == nil {
+		return
+	}
+
+	if fg.Flows == nil {
+		fg.Flows = ordered_map.NewOrderedMap()
+	}
+	if fg.Groups == nil {
+		fg.Groups = ordered_map.NewOrderedMap()
+	}
+	//Add group only if absent
+	if _, exist := fg.Groups.Get(group.Desc.GroupId); !exist {
+		fg.Groups.Set(group.Desc.GroupId, group)
+	}
+}
+
+//AddFrom add flows and groups from the argument into this structure only if they do not already exist
+func (fg *FlowsAndGroups) AddFrom(from *FlowsAndGroups) {
+	iter := from.Flows.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpFlowStats); isMsg {
+			if _, exist := fg.Flows.Get(protoMsg.Id); !exist {
+				fg.Flows.Set(protoMsg.Id, protoMsg)
+			}
+		}
+	}
+	iter = from.Groups.IterFunc()
+	for kv, ok := iter(); ok; kv, ok = iter() {
+		if protoMsg, isMsg := kv.Value.(*ofp.OfpGroupEntry); isMsg {
+			if _, exist := fg.Groups.Get(protoMsg.Stats.GroupId); !exist {
+				fg.Groups.Set(protoMsg.Stats.GroupId, protoMsg)
+			}
+		}
+	}
+}
+
+type DeviceRules struct {
+	Rules     map[string]*FlowsAndGroups
+	rulesLock sync.RWMutex
+}
+
+func NewDeviceRules() *DeviceRules {
+	var dr DeviceRules
+	dr.Rules = make(map[string]*FlowsAndGroups)
+	return &dr
+}
+
+func (dr *DeviceRules) Copy() *DeviceRules {
+	copyDR := NewDeviceRules()
+	if dr != nil {
+		dr.rulesLock.RLock()
+		defer dr.rulesLock.RUnlock()
+		for key, val := range dr.Rules {
+			if val != nil {
+				copyDR.Rules[key] = val.Copy()
+			}
+		}
+	}
+	return copyDR
+}
+
+func (dr *DeviceRules) Keys() []string {
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	keys := make([]string, 0, len(dr.Rules))
+	for k := range dr.Rules {
+		keys = append(keys, k)
+	}
+	return keys
+}
+
+func (dr *DeviceRules) ClearFlows(deviceId string) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
+	if _, exist := dr.Rules[deviceId]; exist {
+		dr.Rules[deviceId].Flows = ordered_map.NewOrderedMap()
+	}
+}
+
+func (dr *DeviceRules) FilterRules(deviceIds map[string]string) *DeviceRules {
+	filteredDR := NewDeviceRules()
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	for key, val := range dr.Rules {
+		if _, exist := deviceIds[key]; exist {
+			filteredDR.Rules[key] = val.Copy()
+		}
+	}
+	return filteredDR
+}
+
+func (dr *DeviceRules) RemoveRule(deviceId string) {
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	delete(dr.Rules, deviceId)
+}
+
+func (dr *DeviceRules) AddFlow(deviceId string, flow *ofp.OfpFlowStats) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
+	if _, exist := dr.Rules[deviceId]; !exist {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId].AddFlow(flow)
+}
+
+func (dr *DeviceRules) GetRules() map[string]*FlowsAndGroups {
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	return dr.Rules
+}
+
+func (dr *DeviceRules) String() string {
+	var buffer bytes.Buffer
+	dr.rulesLock.RLock()
+	defer dr.rulesLock.RUnlock()
+	for key, value := range dr.Rules {
+		buffer.WriteString("DeviceId:")
+		buffer.WriteString(key)
+		buffer.WriteString(value.String())
+		buffer.WriteString("\n\n")
+	}
+	return buffer.String()
+}
+
+func (dr *DeviceRules) AddFlowsAndGroup(deviceId string, fg *FlowsAndGroups) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+	dr.Rules[deviceId] = fg
+}
+
+// CreateEntryIfNotExist creates a new deviceId in the Map if it does not exist and assigns an
+// empty FlowsAndGroups to it.  Otherwise, it does nothing.
+func (dr *DeviceRules) CreateEntryIfNotExist(deviceId string) {
+	dr.rulesLock.Lock()
+	defer dr.rulesLock.Unlock()
+	if _, ok := dr.Rules[deviceId]; !ok {
+		dr.Rules[deviceId] = NewFlowsAndGroups()
+	}
+}
+
+/*
+ *  Common flow routines
+ */
+
+//FindOverlappingFlows return a list of overlapping flow(s) where mod is the flow request
+func FindOverlappingFlows(flows []*ofp.OfpFlowStats, mod *ofp.OfpFlowMod) []*ofp.OfpFlowStats {
+	return nil //TODO - complete implementation
+}
+
+// FindFlowById returns the index of the flow in the flows array if present. Otherwise, it returns -1
+func FindFlowById(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if flow.Id == f.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+// FindFlows returns the index in flows where flow if present.  Otherwise, it returns -1
+func FindFlows(flows []*ofp.OfpFlowStats, flow *ofp.OfpFlowStats) int {
+	for idx, f := range flows {
+		if f.Id == flow.Id {
+			return idx
+		}
+	}
+	return -1
+}
+
+//FlowMatch returns true if two flows matches on the following flow attributes:
+//TableId, Priority, Flags, Cookie, Match
+func FlowMatch(f1 *ofp.OfpFlowStats, f2 *ofp.OfpFlowStats) bool {
+	return f1 != nil && f2 != nil && f1.Id == f2.Id
+}
+
+//FlowMatchesMod returns True if given flow is "covered" by the wildcard flow_mod, taking into consideration of
+//both exact matches as well as masks-based match fields if any. Otherwise return False
+func FlowMatchesMod(flow *ofp.OfpFlowStats, mod *ofp.OfpFlowMod) bool {
+	if flow == nil || mod == nil {
+		return false
+	}
+	//Check if flow.cookie is covered by mod.cookie and mod.cookie_mask
+	if (flow.Cookie & mod.CookieMask) != (mod.Cookie & mod.CookieMask) {
+		return false
+	}
+
+	//Check if flow.table_id is covered by flow_mod.table_id
+	if mod.TableId != uint32(ofp.OfpTable_OFPTT_ALL) && flow.TableId != mod.TableId {
+		return false
+	}
+
+	//Check out_port
+	if (mod.OutPort&0x7fffffff) != uint32(ofp.OfpPortNo_OFPP_ANY) && !FlowHasOutPort(flow, mod.OutPort) {
+		return false
+	}
+
+	//	Check out_group
+	if (mod.OutGroup&0x7fffffff) != uint32(ofp.OfpGroup_OFPG_ANY) && !FlowHasOutGroup(flow, mod.OutGroup) {
+		return false
+	}
+
+	//Priority is ignored
+
+	//Check match condition
+	//If the flow_mod match field is empty, that is a special case and indicates the flow entry matches
+	if (mod.Match == nil) || (mod.Match.OxmFields == nil) || (len(mod.Match.OxmFields) == 0) {
+		//If we got this far and the match is empty in the flow spec, than the flow matches
+		return true
+	} // TODO : implement the flow match analysis
+	return false
+
+}
+
+//FlowHasOutPort returns True if flow has a output command with the given out_port
+func FlowHasOutPort(flow *ofp.OfpFlowStats, outPort uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_OUTPUT {
+					if (action.GetOutput() != nil) && (action.GetOutput().Port == outPort) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FlowHasOutGroup return True if flow has a output command with the given out_group
+func FlowHasOutGroup(flow *ofp.OfpFlowStats, groupID uint32) bool {
+	if flow == nil {
+		return false
+	}
+	for _, instruction := range flow.Instructions {
+		if instruction.Type == uint32(ofp.OfpInstructionType_OFPIT_APPLY_ACTIONS) {
+			if instruction.GetActions() == nil {
+				return false
+			}
+			for _, action := range instruction.GetActions().Actions {
+				if action.Type == ofp.OfpActionType_OFPAT_GROUP {
+					if (action.GetGroup() != nil) && (action.GetGroup().GroupId == groupID) {
+						return true
+					}
+				}
+
+			}
+		}
+	}
+	return false
+}
+
+//FindGroup returns index of group if found, else returns -1
+func FindGroup(groups []*ofp.OfpGroupEntry, groupId uint32) int {
+	for idx, group := range groups {
+		if group.Desc.GroupId == groupId {
+			return idx
+		}
+	}
+	return -1
+}
+
+func FlowsDeleteByGroupId(flows []*ofp.OfpFlowStats, groupId uint32) (bool, []*ofp.OfpFlowStats) {
+	toKeep := make([]*ofp.OfpFlowStats, 0)
+
+	for _, f := range flows {
+		if !FlowHasOutGroup(f, groupId) {
+			toKeep = append(toKeep, f)
+		}
+	}
+	return len(toKeep) < len(flows), toKeep
+}
+
+func ToOfpOxmField(from []*ofp.OfpOxmOfbField) []*ofp.OfpOxmField {
+	matchFields := make([]*ofp.OfpOxmField, 0)
+	for _, val := range from {
+		matchFields = append(matchFields, &ofp.OfpOxmField{Field: &ofp.OfpOxmField_OfbField{OfbField: val}})
+	}
+	return matchFields
+}
+
+//IsMulticastIp returns true if the ip starts with the byte sequence of 1110;
+//false otherwise.
+func IsMulticastIp(ip uint32) bool {
+	return ip>>28 == 14
+}
+
+//ConvertToMulticastMacInt returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacInt(ip uint32) uint64 {
+	//get last 23 bits of ip address by ip & 00000000011111111111111111111111
+	theLast23BitsOfIp := ip & 8388607
+	// perform OR with 0x1005E000000 to build mcast mac address
+	return 1101088686080 | uint64(theLast23BitsOfIp)
+}
+
+//ConvertToMulticastMacBytes returns equivalent mac address of the given multicast ip address
+func ConvertToMulticastMacBytes(ip uint32) []byte {
+	mac := ConvertToMulticastMacInt(ip)
+	var b bytes.Buffer
+	// catalyze (48 bits) in binary:111111110000000000000000000000000000000000000000
+	catalyze := uint64(280375465082880)
+	//convert each octet to decimal
+	for i := 0; i < 6; i++ {
+		if i != 0 {
+			catalyze >>= 8
+		}
+		octet := mac & catalyze
+		octetDecimal := octet >> uint8(40-i*8)
+		b.WriteByte(byte(octetDecimal))
+	}
+	return b.Bytes()
+}
+
+func GetMeterIdFromWriteMetadata(ctx context.Context, flow *ofp.OfpFlowStats) uint32 {
+	/*
+			  Write metadata instruction value (metadata) is 8 bytes:
+		    	MS 2 bytes: C Tag
+		    	Next 2 bytes: Technology Profile Id
+		    	Next 4 bytes: Port number (uni or nni) or MeterId
+		    	This is set in the ONOS OltPipeline as a write metadata instruction
+	*/
+	var meterID uint32 = 0
+	md := GetMetadataFromWriteMetadataAction(ctx, flow)
+	logger.Debugw(ctx, "found-metadata-for-egress/uni-port", log.Fields{"metadata": md})
+	if md != 0 {
+		meterID = uint32(md & 0xFFFFFFFF)
+		logger.Debugw(ctx, "found-meterID-in-write-metadata-action", log.Fields{"meterID": meterID})
+	}
+	return meterID
+}
+
+func SetMeterIdToFlow(flow *ofp.OfpFlowStats, meterId uint32) {
+	if flow != nil {
+		for _, instruction := range flow.Instructions {
+			if instruction.Type == uint32(METER_ACTION) {
+				if meterInst := instruction.GetMeter(); meterInst != nil {
+					meterInst.MeterId = meterId
+				}
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go
new file mode 100644
index 0000000..b0ce81b
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/common.go
@@ -0,0 +1,27 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package log
+
+var logger CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = RegisterPackage(JSON, ErrorLevel, Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
new file mode 100644
index 0000000..7b1a123
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/log.go
@@ -0,0 +1,662 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// Package log provides a structured Logger interface implemented using zap logger. It provides the following capabilities:
+// 1. Package level logging - a go package can register itself (AddPackage) and have a logger created for that package.
+// 2. Dynamic log level change - for all registered packages (SetAllLogLevel)
+// 3. Dynamic log level change - for a given package (SetPackageLogLevel)
+// 4. Provides a default logger for unregistered packages (however avoid its usage)
+// 5. Allow key-value pairs to be added to a logger(UpdateLogger) or all loggers (UpdateAllLoggers) at run time
+// 6. Add to the log output the location where the log was invoked (filename.functionname.linenumber)
+//
+// Using package-level logging (recommended approach).  In the examples below, log refers to this log package.
+//
+// 1. In the appropriate package, add the following in the init section of the package (usually in a common.go file)
+//    The log level can be changed and any number of default fields can be added as well. The log level specifies
+//    the lowest log level that will be in the output while the fields will be automatically added to all log printouts.
+//    However, as voltha components re-initialize the log level of each registered package to default initial loglevel
+//    passed as CLI argument, the log level passed in RegisterPackage call effectively has no effect.
+//
+//    var logger log.CLogger
+//    func init() {
+//              logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{"key1": "value1"})
+//    }
+//
+// 2. In the calling package, use any of the publicly available functions of local package-level logger instance created
+//    in previous step.  Here is an example to write an Info log with additional fields:
+//
+//    logger.Infow("An example", mylog.Fields{"myStringOutput": "output", "myIntOutput": 2})
+//
+// 3. To dynamically change the log level, you can use
+//          a) SetLogLevel from inside your package or
+//          b) SetPackageLogLevel from anywhere or
+//          c) SetAllLogLevel from anywhere.
+//
+//    Dynamic Loglevel configuration feature also uses SetPackageLogLevel method based on triggers received due to
+//    Changes to configured loglevels
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"path"
+	"runtime"
+	"strings"
+
+	zp "go.uber.org/zap"
+	zc "go.uber.org/zap/zapcore"
+)
+
+type LogLevel int8
+
+const (
+	// DebugLevel logs a message at debug level
+	DebugLevel = LogLevel(iota)
+	// InfoLevel logs a message at info level
+	InfoLevel
+	// WarnLevel logs a message at warning level
+	WarnLevel
+	// ErrorLevel logs a message at error level
+	ErrorLevel
+	// FatalLevel logs a message, then calls os.Exit(1).
+	FatalLevel
+)
+
+// CONSOLE formats the log for the console, mostly used during development
+const CONSOLE = "console"
+
+// JSON formats the log using json format, mostly used by an automated logging system consumption
+const JSON = "json"
+
+// Context Aware Logger represents an abstract logging interface.  Any logging implementation used
+// will need to abide by this interface
+type CLogger interface {
+	Debug(context.Context, ...interface{})
+	Debugln(context.Context, ...interface{})
+	Debugf(context.Context, string, ...interface{})
+	Debugw(context.Context, string, Fields)
+
+	Info(context.Context, ...interface{})
+	Infoln(context.Context, ...interface{})
+	Infof(context.Context, string, ...interface{})
+	Infow(context.Context, string, Fields)
+
+	Warn(context.Context, ...interface{})
+	Warnln(context.Context, ...interface{})
+	Warnf(context.Context, string, ...interface{})
+	Warnw(context.Context, string, Fields)
+
+	Error(context.Context, ...interface{})
+	Errorln(context.Context, ...interface{})
+	Errorf(context.Context, string, ...interface{})
+	Errorw(context.Context, string, Fields)
+
+	Fatal(context.Context, ...interface{})
+	Fatalln(context.Context, ...interface{})
+	Fatalf(context.Context, string, ...interface{})
+	Fatalw(context.Context, string, Fields)
+
+	With(Fields) CLogger
+
+	// The following are added to be able to use this logger as a gRPC LoggerV2 if needed
+	//
+	Warning(context.Context, ...interface{})
+	Warningln(context.Context, ...interface{})
+	Warningf(context.Context, string, ...interface{})
+
+	// V reports whether verbosity level l is at least the requested verbose level.
+	V(l LogLevel) bool
+
+	//Returns the log level of this specific logger
+	GetLogLevel() LogLevel
+}
+
+// Fields is used as key-value pairs for structured logging
+type Fields map[string]interface{}
+
+var defaultLogger *clogger
+var cfg zp.Config
+
+var loggers map[string]*clogger
+var cfgs map[string]zp.Config
+
+type clogger struct {
+	log         *zp.SugaredLogger
+	parent      *zp.Logger
+	packageName string
+}
+
+func logLevelToAtomicLevel(l LogLevel) zp.AtomicLevel {
+	switch l {
+	case DebugLevel:
+		return zp.NewAtomicLevelAt(zc.DebugLevel)
+	case InfoLevel:
+		return zp.NewAtomicLevelAt(zc.InfoLevel)
+	case WarnLevel:
+		return zp.NewAtomicLevelAt(zc.WarnLevel)
+	case ErrorLevel:
+		return zp.NewAtomicLevelAt(zc.ErrorLevel)
+	case FatalLevel:
+		return zp.NewAtomicLevelAt(zc.FatalLevel)
+	}
+	return zp.NewAtomicLevelAt(zc.ErrorLevel)
+}
+
+func logLevelToLevel(l LogLevel) zc.Level {
+	switch l {
+	case DebugLevel:
+		return zc.DebugLevel
+	case InfoLevel:
+		return zc.InfoLevel
+	case WarnLevel:
+		return zc.WarnLevel
+	case ErrorLevel:
+		return zc.ErrorLevel
+	case FatalLevel:
+		return zc.FatalLevel
+	}
+	return zc.ErrorLevel
+}
+
+func levelToLogLevel(l zc.Level) LogLevel {
+	switch l {
+	case zc.DebugLevel:
+		return DebugLevel
+	case zc.InfoLevel:
+		return InfoLevel
+	case zc.WarnLevel:
+		return WarnLevel
+	case zc.ErrorLevel:
+		return ErrorLevel
+	case zc.FatalLevel:
+		return FatalLevel
+	}
+	return ErrorLevel
+}
+
+func StringToLogLevel(l string) (LogLevel, error) {
+	switch strings.ToUpper(l) {
+	case "DEBUG":
+		return DebugLevel, nil
+	case "INFO":
+		return InfoLevel, nil
+	case "WARN":
+		return WarnLevel, nil
+	case "ERROR":
+		return ErrorLevel, nil
+	case "FATAL":
+		return FatalLevel, nil
+	}
+	return 0, errors.New("Given LogLevel is invalid : " + l)
+}
+
+func LogLevelToString(l LogLevel) (string, error) {
+	switch l {
+	case DebugLevel:
+		return "DEBUG", nil
+	case InfoLevel:
+		return "INFO", nil
+	case WarnLevel:
+		return "WARN", nil
+	case ErrorLevel:
+		return "ERROR", nil
+	case FatalLevel:
+		return "FATAL", nil
+	}
+	return "", fmt.Errorf("Given LogLevel is invalid %d", l)
+}
+
+func getDefaultConfig(outputType string, level LogLevel, defaultFields Fields) zp.Config {
+	return zp.Config{
+		Level:            logLevelToAtomicLevel(level),
+		Encoding:         outputType,
+		Development:      true,
+		OutputPaths:      []string{"stdout"},
+		ErrorOutputPaths: []string{"stderr"},
+		InitialFields:    defaultFields,
+		EncoderConfig: zc.EncoderConfig{
+			LevelKey:       "level",
+			MessageKey:     "msg",
+			TimeKey:        "ts",
+			CallerKey:      "caller",
+			StacktraceKey:  "stacktrace",
+			LineEnding:     zc.DefaultLineEnding,
+			EncodeLevel:    zc.LowercaseLevelEncoder,
+			EncodeTime:     zc.ISO8601TimeEncoder,
+			EncodeDuration: zc.SecondsDurationEncoder,
+			EncodeCaller:   zc.ShortCallerEncoder,
+		},
+	}
+}
+
+func ConstructZapConfig(outputType string, level LogLevel, fields Fields) zp.Config {
+	return getDefaultConfig(outputType, level, fields)
+}
+
+// SetLogger needs to be invoked before the logger API can be invoked.  This function
+// initialize the default logger (zap's sugaredlogger)
+func SetDefaultLogger(outputType string, level LogLevel, defaultFields Fields) (CLogger, error) {
+	// Build a custom config using zap
+	cfg = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	defaultLogger = &clogger{
+		log:    l.Sugar(),
+		parent: l,
+	}
+
+	return defaultLogger, nil
+}
+
+// AddPackage registers a package to the log map.  Each package gets its own logger which allows
+// its config (loglevel) to be changed dynamically without interacting with the other packages.
+// outputType is JSON, level is the lowest level log to output with this logger and defaultFields is a map of
+// key-value pairs to always add to the output.
+// Note: AddPackage also returns a reference to the actual logger.  If a calling package uses this reference directly
+//instead of using the publicly available functions in this log package then a number of functionalities will not
+// be available to it, notably log tracing with filename.functionname.linenumber annotation.
+//
+// pkgNames parameter should be used for testing only as this function detects the caller's package.
+func RegisterPackage(outputType string, level LogLevel, defaultFields Fields, pkgNames ...string) (CLogger, error) {
+	if cfgs == nil {
+		cfgs = make(map[string]zp.Config)
+	}
+	if loggers == nil {
+		loggers = make(map[string]*clogger)
+	}
+
+	var pkgName string
+	for _, name := range pkgNames {
+		pkgName = name
+		break
+	}
+	if pkgName == "" {
+		pkgName, _, _, _ = getCallerInfo()
+	}
+
+	if _, exist := loggers[pkgName]; exist {
+		return loggers[pkgName], nil
+	}
+
+	cfgs[pkgName] = getDefaultConfig(outputType, level, defaultFields)
+
+	l, err := cfgs[pkgName].Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return nil, err
+	}
+
+	loggers[pkgName] = &clogger{
+		log:         l.Sugar(),
+		parent:      l,
+		packageName: pkgName,
+	}
+	return loggers[pkgName], nil
+}
+
+//UpdateAllLoggers create new loggers for all registered pacakges with the defaultFields.
+func UpdateAllLoggers(defaultFields Fields) error {
+	for pkgName, cfg := range cfgs {
+		for k, v := range defaultFields {
+			if cfg.InitialFields == nil {
+				cfg.InitialFields = make(map[string]interface{})
+			}
+			cfg.InitialFields[k] = v
+		}
+		l, err := cfg.Build(zp.AddCallerSkip(1))
+		if err != nil {
+			return err
+		}
+
+		// Update the existing zap logger instance
+		loggers[pkgName].log = l.Sugar()
+		loggers[pkgName].parent = l
+	}
+	return nil
+}
+
+// Return a list of all packages that have individually-configured loggers
+func GetPackageNames() []string {
+	i := 0
+	keys := make([]string, len(loggers))
+	for k := range loggers {
+		keys[i] = k
+		i++
+	}
+	return keys
+}
+
+// UpdateLogger updates the logger associated with a caller's package with supplied defaultFields
+func UpdateLogger(defaultFields Fields) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := loggers[pkgName]; !exist {
+		return fmt.Errorf("package-%s-not-registered", pkgName)
+	}
+
+	// Build a new logger
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("config-%s-not-registered", pkgName)
+	}
+
+	cfg := cfgs[pkgName]
+	for k, v := range defaultFields {
+		if cfg.InitialFields == nil {
+			cfg.InitialFields = make(map[string]interface{})
+		}
+		cfg.InitialFields[k] = v
+	}
+	l, err := cfg.Build(zp.AddCallerSkip(1))
+	if err != nil {
+		return err
+	}
+
+	// Update the existing zap logger instance
+	loggers[pkgName].log = l.Sugar()
+	loggers[pkgName].parent = l
+
+	return nil
+}
+
+func setLevel(cfg zp.Config, level LogLevel) {
+	switch level {
+	case DebugLevel:
+		cfg.Level.SetLevel(zc.DebugLevel)
+	case InfoLevel:
+		cfg.Level.SetLevel(zc.InfoLevel)
+	case WarnLevel:
+		cfg.Level.SetLevel(zc.WarnLevel)
+	case ErrorLevel:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	case FatalLevel:
+		cfg.Level.SetLevel(zc.FatalLevel)
+	default:
+		cfg.Level.SetLevel(zc.ErrorLevel)
+	}
+}
+
+//SetPackageLogLevel dynamically sets the log level of a given package to level.  This is typically invoked at an
+// application level during debugging
+func SetPackageLogLevel(packageName string, level LogLevel) {
+	// Get proper config
+	if cfg, ok := cfgs[packageName]; ok {
+		setLevel(cfg, level)
+	}
+}
+
+//SetAllLogLevel sets the log level of all registered packages to level
+func SetAllLogLevel(level LogLevel) {
+	// Get proper config
+	for _, cfg := range cfgs {
+		setLevel(cfg, level)
+	}
+}
+
+//GetPackageLogLevel returns the current log level of a package.
+func GetPackageLogLevel(packageName ...string) (LogLevel, error) {
+	var name string
+	if len(packageName) == 1 {
+		name = packageName[0]
+	} else {
+		name, _, _, _ = getCallerInfo()
+	}
+	if cfg, ok := cfgs[name]; ok {
+		return levelToLogLevel(cfg.Level.Level()), nil
+	}
+	return 0, fmt.Errorf("unknown-package-%s", name)
+}
+
+//GetDefaultLogLevel gets the log level used for packages that don't have specific loggers
+func GetDefaultLogLevel() LogLevel {
+	return levelToLogLevel(cfg.Level.Level())
+}
+
+//SetLogLevel sets the log level for the logger corresponding to the caller's package
+func SetLogLevel(level LogLevel) error {
+	pkgName, _, _, _ := getCallerInfo()
+	if _, exist := cfgs[pkgName]; !exist {
+		return fmt.Errorf("unregistered-package-%s", pkgName)
+	}
+	cfg := cfgs[pkgName]
+	setLevel(cfg, level)
+	return nil
+}
+
+//SetDefaultLogLevel sets the log level used for packages that don't have specific loggers
+func SetDefaultLogLevel(level LogLevel) {
+	setLevel(cfg, level)
+}
+
+// CleanUp flushed any buffered log entries. Applications should take care to call
+// CleanUp before exiting.
+func CleanUp() error {
+	for _, logger := range loggers {
+		if logger != nil {
+			if logger.parent != nil {
+				if err := logger.parent.Sync(); err != nil {
+					return err
+				}
+			}
+		}
+	}
+	if defaultLogger != nil {
+		if defaultLogger.parent != nil {
+			if err := defaultLogger.parent.Sync(); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
+
+func getCallerInfo() (string, string, string, int) {
+	// Since the caller of a log function is one stack frame before (in terms of stack higher level) the log.go
+	// filename, then first look for the last log.go filename and then grab the caller info one level higher.
+	maxLevel := 3
+	skiplevel := 3 // Level with the most empirical success to see the last log.go stack frame.
+	pc := make([]uintptr, maxLevel)
+	n := runtime.Callers(skiplevel, pc)
+	packageName := ""
+	funcName := ""
+	fileName := ""
+	var line int
+	if n == 0 {
+		return packageName, fileName, funcName, line
+	}
+	frames := runtime.CallersFrames(pc[:n])
+	var frame runtime.Frame
+	var foundFrame runtime.Frame
+	more := true
+	for more {
+		frame, more = frames.Next()
+		_, fileName = path.Split(frame.File)
+		if fileName != "log.go" {
+			foundFrame = frame // First frame after log.go in the frame stack
+			break
+		}
+	}
+	parts := strings.Split(foundFrame.Function, ".")
+	pl := len(parts)
+	if pl >= 2 {
+		funcName = parts[pl-1]
+		if parts[pl-2][0] == '(' {
+			packageName = strings.Join(parts[0:pl-2], ".")
+		} else {
+			packageName = strings.Join(parts[0:pl-1], ".")
+		}
+	}
+
+	if strings.HasSuffix(packageName, ".init") {
+		packageName = strings.TrimSuffix(packageName, ".init")
+	}
+
+	if strings.HasSuffix(fileName, ".go") {
+		fileName = strings.TrimSuffix(fileName, ".go")
+	}
+
+	return packageName, fileName, funcName, foundFrame.Line
+}
+
+// With returns a logger initialized with the key-value pairs
+func (l clogger) With(keysAndValues Fields) CLogger {
+	return clogger{log: l.log.With(serializeMap(keysAndValues)...), parent: l.parent}
+}
+
+// Debug logs a message at level Debug on the standard logger.
+func (l clogger) Debug(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugln logs a message at level Debug on the standard logger with a line feed. Default in any case.
+func (l clogger) Debugln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debug(args...)
+}
+
+// Debugw logs a message at level Debug on the standard logger.
+func (l clogger) Debugf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugf(format, args...)
+}
+
+// Debugw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Debugw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(DebugLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Debugw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Info logs a message at level Info on the standard logger.
+func (l clogger) Info(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+}
+
+// Infoln logs a message at level Info on the standard logger with a line feed. Default in any case.
+func (l clogger) Infoln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Info(args...)
+	//msg := fmt.Sprintln(args...)
+	//l.sourced().Info(msg[:len(msg)-1])
+}
+
+// Infof logs a message at level Info on the standard logger.
+func (l clogger) Infof(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infof(format, args...)
+}
+
+// Infow logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Infow(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(InfoLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Infow(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Warn logs a message at level Warn on the standard logger.
+func (l clogger) Warn(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warnln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warnf logs a message at level Warn on the standard logger.
+func (l clogger) Warnf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// Warnw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Warnw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(WarnLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Error logs a message at level Error on the standard logger.
+func (l clogger) Error(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorln logs a message at level Error on the standard logger with a line feed. Default in any case.
+func (l clogger) Errorln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Error(args...)
+}
+
+// Errorf logs a message at level Error on the standard logger.
+func (l clogger) Errorf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorf(format, args...)
+}
+
+// Errorw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Errorw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(ErrorLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Errorw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Fatal logs a message at level Fatal on the standard logger.
+func (l clogger) Fatal(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalln logs a message at level Fatal on the standard logger with a line feed. Default in any case.
+func (l clogger) Fatalln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatal(args...)
+}
+
+// Fatalf logs a message at level Fatal on the standard logger.
+func (l clogger) Fatalf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalf(format, args...)
+}
+
+// Fatalw logs a message with some additional context. The variadic key-value
+// pairs are treated as they are in With.
+func (l clogger) Fatalw(ctx context.Context, msg string, keysAndValues Fields) {
+	if l.V(FatalLevel) {
+		l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Fatalw(msg, serializeMap(keysAndValues)...)
+	}
+}
+
+// Warning logs a message at level Warn on the standard logger.
+func (l clogger) Warning(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningln logs a message at level Warn on the standard logger with a line feed. Default in any case.
+func (l clogger) Warningln(ctx context.Context, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warn(args...)
+}
+
+// Warningf logs a message at level Warn on the standard logger.
+func (l clogger) Warningf(ctx context.Context, format string, args ...interface{}) {
+	l.log.With(GetGlobalLFM().ExtractContextAttributes(ctx)...).Warnf(format, args...)
+}
+
+// V reports whether verbosity level l is at least the requested verbose level.
+func (l clogger) V(level LogLevel) bool {
+	return l.parent.Core().Enabled(logLevelToLevel(level))
+}
+
+// GetLogLevel returns the current level of the logger
+func (l clogger) GetLogLevel() LogLevel {
+	return levelToLogLevel(cfgs[l.packageName].Level.Level())
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
new file mode 100644
index 0000000..82c3d7d
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/log/utils.go
@@ -0,0 +1,468 @@
+/*
+ * Copyright 2018-present Open Networking Foundation
+
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+
+ * http://www.apache.org/licenses/LICENSE-2.0
+
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+// File contains utility functions to support Open Tracing in conjunction with
+// Enhanced Logging based on context propagation
+
+package log
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"github.com/opentracing/opentracing-go"
+	jtracing "github.com/uber/jaeger-client-go"
+	jcfg "github.com/uber/jaeger-client-go/config"
+	"io"
+	"os"
+	"strings"
+	"sync"
+)
+
+const (
+	RootSpanNameKey = "op-name"
+)
+
+// Global Settings governing the Log Correlation and Tracing features. Should only
+// be updated through the exposed public methods
+type LogFeaturesManager struct {
+	isTracePublishingEnabled bool
+	isLogCorrelationEnabled  bool
+	componentName            string // Name of component extracted from ENV variable
+	activeTraceAgentAddress  string
+	lock                     sync.Mutex
+}
+
+var globalLFM *LogFeaturesManager = &LogFeaturesManager{}
+
+func GetGlobalLFM() *LogFeaturesManager {
+	return globalLFM
+}
+
+// A Wrapper to utilize currently Active Tracer instance. The middleware library being used for generating
+// Spans for GRPC API calls does not support dynamically setting the Active Tracer similar to the SetGlobalTracer method
+// provided by OpenTracing API
+type ActiveTracerProxy struct {
+}
+
+func (atw ActiveTracerProxy) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
+	return opentracing.GlobalTracer().StartSpan(operationName, opts...)
+}
+
+func (atw ActiveTracerProxy) Inject(sm opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	return opentracing.GlobalTracer().Inject(sm, format, carrier)
+}
+
+func (atw ActiveTracerProxy) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
+	return opentracing.GlobalTracer().Extract(format, carrier)
+}
+
+// Jaeger complaint Logger instance to redirect logs to Default Logger
+type traceLogger struct {
+	logger *clogger
+}
+
+func (tl traceLogger) Error(msg string) {
+	tl.logger.Error(context.Background(), msg)
+}
+
+func (tl traceLogger) Infof(msg string, args ...interface{}) {
+	// Tracing logs should be performed only at Debug Verbosity
+	tl.logger.Debugf(context.Background(), msg, args...)
+}
+
+// Wrapper to handle correct Closer call at the time of Process Termination
+type traceCloser struct {
+}
+
+func (c traceCloser) Close() error {
+	currentActiveTracer := opentracing.GlobalTracer()
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+
+	return nil
+}
+
+// Method to Initialize Jaeger based Tracing client based on initial status of Tracing Publish and Log Correlation
+func (lfm *LogFeaturesManager) InitTracingAndLogCorrelation(tracePublishEnabled bool, traceAgentAddress string, logCorrelationEnabled bool) (io.Closer, error) {
+	lfm.componentName = os.Getenv("COMPONENT_NAME")
+	if lfm.componentName == "" {
+		return nil, errors.New("Unable to retrieve PoD Component Name from Runtime env")
+	}
+
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	// Use NoopTracer when both Tracing Publishing and Log Correlation are disabled
+	if !tracePublishEnabled && !logCorrelationEnabled {
+		logger.Info(context.Background(), "Skipping Global Tracer initialization as both Trace publish and Log correlation are configured as disabled")
+		lfm.isTracePublishingEnabled = false
+		lfm.isLogCorrelationEnabled = false
+		opentracing.SetGlobalTracer(opentracing.NoopTracer{})
+		return traceCloser{}, nil
+	}
+
+	tracer, _, err := lfm.constructJaegerTracer(tracePublishEnabled, traceAgentAddress, true)
+	if err != nil {
+		return nil, err
+	}
+
+	// Initialize variables representing Active Status
+	opentracing.SetGlobalTracer(tracer)
+	lfm.isTracePublishingEnabled = tracePublishEnabled
+	lfm.activeTraceAgentAddress = traceAgentAddress
+	lfm.isLogCorrelationEnabled = logCorrelationEnabled
+	return traceCloser{}, nil
+}
+
+// Method to replace Active Tracer along with graceful closer of previous tracer
+func (lfm *LogFeaturesManager) replaceActiveTracer(tracer opentracing.Tracer) {
+	currentActiveTracer := opentracing.GlobalTracer()
+	opentracing.SetGlobalTracer(tracer)
+
+	if currentActiveTracer != nil {
+		if jTracer, ok := currentActiveTracer.(*jtracing.Tracer); ok {
+			jTracer.Close()
+		}
+	}
+}
+
+func (lfm *LogFeaturesManager) GetLogCorrelationStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isLogCorrelationEnabled
+}
+
+func (lfm *LogFeaturesManager) SetLogCorrelationStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isLogCorrelationEnabled {
+		logger.Debugf(context.Background(), "Ignoring Log Correlation Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Log Correlation has been enabled and current active tracer is a NoopTracer instance.
+		// Continue using the earlier tracer instance in case of any error
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); ok {
+			tracer, _, err := lfm.constructJaegerTracer(lfm.isTracePublishingEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Log Correlation Enable operation failed with error: %s", err.Error())
+				return
+			}
+
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isLogCorrelationEnabled = true
+		logger.Info(context.Background(), "Log Correlation has been enabled")
+
+	} else {
+		// Switch to NoopTracer when Log Correlation has been disabled and Tracing Publish is already disabled
+		if _, ok := opentracing.GlobalTracer().(opentracing.NoopTracer); !ok && !lfm.isTracePublishingEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		}
+
+		lfm.isLogCorrelationEnabled = false
+		logger.Info(context.Background(), "Log Correlation has been disabled")
+	}
+}
+
+func (lfm *LogFeaturesManager) GetTracePublishingStatus() bool {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	return lfm.isTracePublishingEnabled
+}
+
+func (lfm *LogFeaturesManager) SetTracePublishingStatus(isEnabled bool) {
+	lfm.lock.Lock()
+	defer lfm.lock.Unlock()
+
+	if isEnabled == lfm.isTracePublishingEnabled {
+		logger.Debugf(context.Background(), "Ignoring Trace Publishing Set operation with value %t; current Status same as desired", isEnabled)
+		return
+	}
+
+	if isEnabled {
+		// Construct new Tracer instance if Tracing Publish has been enabled (even if a Jaeger instance is already active)
+		// This is needed to ensure that a fresh lookup of Jaeger Agent address is performed again while performing
+		// Disable-Enable of Tracing
+		tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+		if err != nil {
+			logger.Warnf(context.Background(), "Trace Publishing Enable operation failed with error: %s", err.Error())
+			return
+		}
+		lfm.replaceActiveTracer(tracer)
+
+		lfm.isTracePublishingEnabled = true
+		logger.Info(context.Background(), "Tracing Publishing has been enabled")
+	} else {
+		// Switch to NoopTracer when Tracing Publish has been disabled and Log Correlation is already disabled
+		if !lfm.isLogCorrelationEnabled {
+			lfm.replaceActiveTracer(opentracing.NoopTracer{})
+		} else {
+			// Else construct a new Jaeger Instance with publishing disabled
+			tracer, _, err := lfm.constructJaegerTracer(isEnabled, lfm.activeTraceAgentAddress, false)
+			if err != nil {
+				logger.Warnf(context.Background(), "Trace Publishing Disable operation failed with error: %s", err.Error())
+				return
+			}
+			lfm.replaceActiveTracer(tracer)
+		}
+
+		lfm.isTracePublishingEnabled = false
+		logger.Info(context.Background(), "Tracing Publishing has been disabled")
+	}
+}
+
+// Method to contruct a new Jaeger Tracer instance based on given Trace Agent address and Publish status.
+// The last attribute indicates whether to use Loopback IP for creating Jaeger Client when the DNS lookup
+// of supplied Trace Agent address has failed. It is fine to fallback during the initialization step, but
+// not later (when enabling/disabling the status dynamically)
+func (lfm *LogFeaturesManager) constructJaegerTracer(tracePublishEnabled bool, traceAgentAddress string, fallbackToLoopbackAllowed bool) (opentracing.Tracer, io.Closer, error) {
+	cfg := jcfg.Configuration{ServiceName: lfm.componentName}
+
+	var err error
+	var jReporterConfig jcfg.ReporterConfig
+	var jReporterCfgOption jtracing.Reporter
+
+	logger.Info(context.Background(), "Constructing new Jaeger Tracer instance")
+
+	// Attempt Trace Agent Address first; will fallback to Loopback IP if it fails
+	jReporterConfig = jcfg.ReporterConfig{LocalAgentHostPort: traceAgentAddress, LogSpans: true}
+	jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+
+	if err != nil {
+		if !fallbackToLoopbackAllowed {
+			return nil, nil, errors.New("Reporter Creation for given Trace Agent address " + traceAgentAddress + " failed with error : " + err.Error())
+		}
+
+		logger.Infow(context.Background(), "Unable to create Reporter with given Trace Agent address",
+			Fields{"error": err, "address": traceAgentAddress})
+		// The Reporter initialization may fail due to Invalid Agent address or non-existent Agent (DNS lookup failure).
+		// It is essential for Tracer Instance to still start for correct Span propagation needed for log correlation.
+		// Thus, falback to use loopback IP for Reporter initialization before throwing back any error
+		tracePublishEnabled = false
+
+		jReporterConfig.LocalAgentHostPort = "127.0.0.1:6831"
+		jReporterCfgOption, err = jReporterConfig.NewReporter(lfm.componentName, jtracing.NewNullMetrics(), traceLogger{logger: logger.(*clogger)})
+		if err != nil {
+			return nil, nil, errors.New("Failed to initialize Jaeger Tracing due to Reporter creation error : " + err.Error())
+		}
+	}
+
+	// To start with, we are using Constant Sampling type
+	samplerParam := 0 // 0: Do not publish span, 1: Publish
+	if tracePublishEnabled {
+		samplerParam = 1
+	}
+	jSamplerConfig := jcfg.SamplerConfig{Type: "const", Param: float64(samplerParam)}
+	jSamplerCfgOption, err := jSamplerConfig.NewSampler(lfm.componentName, jtracing.NewNullMetrics())
+	if err != nil {
+		return nil, nil, errors.New("Unable to create Sampler : " + err.Error())
+	}
+
+	return cfg.NewTracer(jcfg.Reporter(jReporterCfgOption), jcfg.Sampler(jSamplerCfgOption))
+}
+
+func TerminateTracing(c io.Closer) {
+	err := c.Close()
+	if err != nil {
+		logger.Error(context.Background(), "error-while-closing-jaeger-tracer", Fields{"err": err})
+	}
+}
+
+// Extracts details of Execution Context as log fields from the Tracing Span injected into the
+// context instance. Following log fields are extracted:
+// 1. Operation Name : key as 'op-name' and value as Span operation name
+// 2. Operation Id : key as 'op-id' and value as 64 bit Span Id in hex digits string
+//
+// Additionally, any tags present in Span are also extracted to use as log fields e.g. device-id.
+//
+// If no Span is found associated with context, blank slice is returned without any log fields
+func (lfm *LogFeaturesManager) ExtractContextAttributes(ctx context.Context) []interface{} {
+	if !lfm.isLogCorrelationEnabled {
+		return make([]interface{}, 0)
+	}
+
+	attrMap := make(map[string]interface{})
+
+	if ctx != nil {
+		if span := opentracing.SpanFromContext(ctx); span != nil {
+			if jspan, ok := span.(*jtracing.Span); ok {
+				// Add Log fields for operation identified by Root Level Span (Trace)
+				opId := fmt.Sprintf("%016x", jspan.SpanContext().TraceID().Low) // Using Sprintf to avoid removal of leading 0s
+				opName := jspan.BaggageItem(RootSpanNameKey)
+
+				taskId := fmt.Sprintf("%016x", uint64(jspan.SpanContext().SpanID())) // Using Sprintf to avoid removal of leading 0s
+				taskName := jspan.OperationName()
+
+				if opName == "" {
+					span.SetBaggageItem(RootSpanNameKey, taskName)
+					opName = taskName
+				}
+
+				attrMap["op-id"] = opId
+				attrMap["op-name"] = opName
+
+				// Add Log fields for task identified by Current Span, if it is different
+				// than operation
+				if taskId != opId {
+					attrMap["task-id"] = taskId
+					attrMap["task-name"] = taskName
+				}
+
+				for k, v := range jspan.Tags() {
+					// Ignore the special tags added by Jaeger, middleware (sampler.type, span.*) present in the span
+					if strings.HasPrefix(k, "sampler.") || strings.HasPrefix(k, "span.") || k == "component" {
+						continue
+					}
+
+					attrMap[k] = v
+				}
+
+				processBaggageItems := func(k, v string) bool {
+					if k != "rpc-span-name" {
+						attrMap[k] = v
+					}
+					return true
+				}
+
+				jspan.SpanContext().ForeachBaggageItem(processBaggageItems)
+			}
+		}
+	}
+
+	return serializeMap(attrMap)
+}
+
+// Method to inject additional log fields into Span e.g. device-id
+func EnrichSpan(ctx context.Context, keyAndValues ...Fields) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		if jspan, ok := span.(*jtracing.Span); ok {
+			// Inject as a BaggageItem when the Span is the Root Span so that it propagates
+			// across the components along with Root Span (called as Trace)
+			// Else, inject as a Tag so that it is attached to the Child Task
+			isRootSpan := false
+			if jspan.SpanContext().TraceID().String() == jspan.SpanContext().SpanID().String() {
+				isRootSpan = true
+			}
+
+			for _, field := range keyAndValues {
+				for k, v := range field {
+					if isRootSpan {
+						span.SetBaggageItem(k, v.(string))
+					} else {
+						span.SetTag(k, v)
+					}
+				}
+			}
+		}
+	}
+}
+
+// Method to inject Error into the Span in event of any operation failure
+func MarkSpanError(ctx context.Context, err error) {
+	span := opentracing.SpanFromContext(ctx)
+	if span != nil {
+		span.SetTag("error", true)
+		span.SetTag("err", err)
+	}
+}
+
+// Creates a Child Span from Parent Span embedded in passed context. Should be used before starting a new major
+// operation in Synchronous or Asynchronous mode (go routine), such as following:
+// 1. Start of all implemented External API methods unless using a interceptor for auto-injection of Span (Server side impl)
+// 2. Just before calling an Third-Party lib which is invoking a External API (etcd, kafka)
+// 3. In start of a Go Routine responsible for performing a major task involving significant duration
+// 4. Any method which is suspected to be time consuming...
+func CreateChildSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+	childSpan, newCtx := opentracing.StartSpanFromContext(ctx, taskName)
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		childSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return childSpan, newCtx
+}
+
+// Creates a Async Child Span with Follows-From relationship from Parent Span embedded in passed context.
+// Should be used only in scenarios when
+// a) There is dis-continuation in execution and thus result of Child span does not affect the Parent flow at all
+// b) The execution of Child Span is guaranteed to start after the completion of Parent Span
+// In case of any confusion, use CreateChildSpan method
+// Some situations where this method would be suitable includes Kafka Async RPC call, Propagation of Event across
+// a channel etc.
+func CreateAsyncSpan(ctx context.Context, taskName string, keyAndValues ...Fields) (opentracing.Span, context.Context) {
+	if !GetGlobalLFM().GetLogCorrelationStatus() && !GetGlobalLFM().GetTracePublishingStatus() {
+		return opentracing.NoopTracer{}.StartSpan(taskName), ctx
+	}
+
+	var asyncSpan opentracing.Span
+	var newCtx context.Context
+
+	parentSpan := opentracing.SpanFromContext(ctx)
+
+	// We should always be creating Aysnc span from a Valid parent span. If not, create a Child span instead
+	if parentSpan == nil {
+		logger.Warn(context.Background(), "Async span must be created with a Valid parent span only")
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(ctx, taskName)
+	} else {
+		// Use Background context as the base for Follows-from case; else new span is getting both Child and FollowsFrom relationship
+		asyncSpan, newCtx = opentracing.StartSpanFromContext(context.Background(), taskName, opentracing.FollowsFrom(parentSpan.Context()))
+	}
+
+	if parentSpan == nil || parentSpan.BaggageItem(RootSpanNameKey) == "" {
+		asyncSpan.SetBaggageItem(RootSpanNameKey, taskName)
+	}
+
+	EnrichSpan(newCtx, keyAndValues...)
+	return asyncSpan, newCtx
+}
+
+// Extracts the span from Source context and injects into the supplied Target context.
+// This should be used in situations wherein we are calling a time-sensitive operation (etcd update) and hence
+// had a context.Background() used earlier to avoid any cancellation/timeout of operation by passed context.
+// This will allow propagation of span with a different base context (and not the original context)
+func WithSpanFromContext(targetCtx, sourceCtx context.Context) context.Context {
+	span := opentracing.SpanFromContext(sourceCtx)
+	return opentracing.ContextWithSpan(targetCtx, span)
+}
+
+// Utility method to convert log Fields into array of interfaces expected by zap logger methods
+func serializeMap(fields Fields) []interface{} {
+	data := make([]interface{}, len(fields)*2)
+	i := 0
+	for k, v := range fields {
+		data[i] = k
+		data[i+1] = v
+		i = i + 2
+	}
+	return data
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
new file mode 100644
index 0000000..6508fd4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/common.go
@@ -0,0 +1,31 @@
+/*
+ * Copyright 2020-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+var logger log.CLogger
+
+func init() {
+	// Setup this package so that it's log level can be modified at run time
+	var err error
+	logger, err = log.RegisterPackage(log.JSON, log.ErrorLevel, log.Fields{})
+	if err != nil {
+		panic(err)
+	}
+}
diff --git a/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
new file mode 100644
index 0000000..7ba1a57
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-lib-go/v7/pkg/probe/probe.go
@@ -0,0 +1,308 @@
+/*
+ * Copyright 2019-present Open Networking Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package probe
+
+import (
+	"context"
+	"fmt"
+	"net/http"
+	"sync"
+
+	"github.com/opencord/voltha-lib-go/v7/pkg/log"
+)
+
+// ProbeContextKey used to fetch the Probe instance from a context
+type ProbeContextKeyType string
+
+// ServiceStatus typed values for service status
+type ServiceStatus int
+
+const (
+	// ServiceStatusUnknown initial state of services
+	ServiceStatusUnknown ServiceStatus = iota
+
+	// ServiceStatusPreparing to optionally be used for prep, such as connecting
+	ServiceStatusPreparing
+
+	// ServiceStatusPrepared to optionally be used when prep is complete, but before run
+	ServiceStatusPrepared
+
+	// ServiceStatusRunning service is functional
+	ServiceStatusRunning
+
+	// ServiceStatusStopped service has stopped, but not because of error
+	ServiceStatusStopped
+
+	// ServiceStatusFailed service has stopped because of an error
+	ServiceStatusFailed
+
+	// ServiceStatusNotReady service has started but is unable to accept requests
+	ServiceStatusNotReady
+)
+
+const (
+	// ProbeContextKey value of context key to fetch probe
+	ProbeContextKey = ProbeContextKeyType("status-update-probe")
+)
+
+// String convert ServiceStatus values to strings
+func (s ServiceStatus) String() string {
+	switch s {
+	default:
+		fallthrough
+	case ServiceStatusUnknown:
+		return "Unknown"
+	case ServiceStatusPreparing:
+		return "Preparing"
+	case ServiceStatusPrepared:
+		return "Prepared"
+	case ServiceStatusRunning:
+		return "Running"
+	case ServiceStatusStopped:
+		return "Stopped"
+	case ServiceStatusFailed:
+		return "Failed"
+	case ServiceStatusNotReady:
+		return "NotReady"
+	}
+}
+
+// ServiceStatusUpdate status update event
+type ServiceStatusUpdate struct {
+	Name   string
+	Status ServiceStatus
+}
+
+// Probe reciever on which to implement probe capabilities
+type Probe struct {
+	readyFunc  func(map[string]ServiceStatus) bool
+	healthFunc func(map[string]ServiceStatus) bool
+
+	mutex     sync.RWMutex
+	status    map[string]ServiceStatus
+	isReady   bool
+	isHealthy bool
+}
+
+// WithReadyFunc override the default ready calculation function
+func (p *Probe) WithReadyFunc(readyFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.readyFunc = readyFunc
+	return p
+}
+
+// WithHealthFunc override the default health calculation function
+func (p *Probe) WithHealthFunc(healthFunc func(map[string]ServiceStatus) bool) *Probe {
+	p.healthFunc = healthFunc
+	return p
+}
+
+// RegisterService register one or more service names with the probe, status will be track against service name
+func (p *Probe) RegisterService(ctx context.Context, names ...string) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+	for _, name := range names {
+		if _, ok := p.status[name]; !ok {
+			p.status[name] = ServiceStatusUnknown
+			logger.Debugw(ctx, "probe-service-registered", log.Fields{"service-name": name})
+		}
+	}
+
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+}
+
+// UpdateStatus utility function to send a service update to the probe
+func (p *Probe) UpdateStatus(ctx context.Context, name string, status ServiceStatus) {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	// if status hasn't changed, avoid doing useless work
+	existingStatus, ok := p.status[name]
+	if ok && (existingStatus == status) {
+		return
+	}
+
+	p.status[name] = status
+	if p.readyFunc != nil {
+		p.isReady = p.readyFunc(p.status)
+	} else {
+		p.isReady = defaultReadyFunc(p.status)
+	}
+
+	if p.healthFunc != nil {
+		p.isHealthy = p.healthFunc(p.status)
+	} else {
+		p.isHealthy = defaultHealthFunc(p.status)
+	}
+	logger.Debugw(ctx, "probe-service-status-updated",
+		log.Fields{
+			"service-name": name,
+			"status":       status.String(),
+			"ready":        p.isReady,
+			"health":       p.isHealthy,
+		})
+}
+
+func (p *Probe) GetStatus(name string) ServiceStatus {
+	p.mutex.Lock()
+	defer p.mutex.Unlock()
+
+	if p.status == nil {
+		p.status = make(map[string]ServiceStatus)
+	}
+
+	currentStatus, ok := p.status[name]
+	if ok {
+		return currentStatus
+	}
+
+	return ServiceStatusUnknown
+}
+
+func GetProbeFromContext(ctx context.Context) *Probe {
+	if ctx != nil {
+		if value := ctx.Value(ProbeContextKey); value != nil {
+			if p, ok := value.(*Probe); ok {
+				return p
+			}
+		}
+	}
+	return nil
+}
+
+// UpdateStatusFromContext a convenience function to pull the Probe reference from the
+// Context, if it exists, and then calling UpdateStatus on that Probe reference. If Context
+// is nil or if a Probe reference is not associated with the ProbeContextKey then nothing
+// happens
+func UpdateStatusFromContext(ctx context.Context, name string, status ServiceStatus) {
+	p := GetProbeFromContext(ctx)
+	if p != nil {
+		p.UpdateStatus(ctx, name, status)
+	}
+}
+
+// pulled out to a function to help better enable unit testing
+func (p *Probe) readzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isReady {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) healthzFunc(w http.ResponseWriter, req *http.Request) {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	if p.isHealthy {
+		w.WriteHeader(http.StatusOK)
+	} else {
+		w.WriteHeader(http.StatusTeapot)
+	}
+}
+func (p *Probe) detailzFunc(w http.ResponseWriter, req *http.Request) {
+	ctx := context.Background()
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	w.Header().Set("Content-Type", "application/json")
+	if _, err := w.Write([]byte("{")); err != nil {
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	comma := ""
+	for c, s := range p.status {
+		if _, err := w.Write([]byte(fmt.Sprintf("%s\"%s\": \"%s\"", comma, c, s.String()))); err != nil {
+			logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+			w.WriteHeader(http.StatusInternalServerError)
+			return
+		}
+		comma = ", "
+	}
+	if _, err := w.Write([]byte("}")); err != nil {
+		logger.Errorw(ctx, "write-response", log.Fields{"error": err})
+		w.WriteHeader(http.StatusInternalServerError)
+		return
+	}
+	w.WriteHeader(http.StatusOK)
+}
+
+// ListenAndServe implements 3 HTTP endpoints on the given port for healthz, readz, and detailz. Returns only on error
+func (p *Probe) ListenAndServe(ctx context.Context, address string) {
+	mux := http.NewServeMux()
+
+	// Returns the result of the readyFunc calculation
+	mux.HandleFunc("/readz", p.readzFunc)
+
+	// Returns the result of the healthFunc calculation
+	mux.HandleFunc("/healthz", p.healthzFunc)
+
+	// Returns the details of the services and their status as JSON
+	mux.HandleFunc("/detailz", p.detailzFunc)
+	s := &http.Server{
+		Addr:    address,
+		Handler: mux,
+	}
+	logger.Fatal(ctx, s.ListenAndServe())
+}
+
+func (p *Probe) IsReady() bool {
+	p.mutex.RLock()
+	defer p.mutex.RUnlock()
+	return p.isReady
+}
+
+// defaultReadyFunc if all services are running then ready, else not
+func defaultReadyFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status != ServiceStatusRunning {
+			return false
+		}
+	}
+	return true
+}
+
+// defaultHealthFunc if no service is stopped or failed, then healthy, else not.
+// service is start as unknown, so they are considered healthy
+func defaultHealthFunc(services map[string]ServiceStatus) bool {
+	if len(services) == 0 {
+		return false
+	}
+	for _, status := range services {
+		if status == ServiceStatusStopped || status == ServiceStatusFailed {
+			return false
+		}
+	}
+	return true
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/common/common.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/common/common.pb.go
new file mode 100644
index 0000000..87d3dca
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/common/common.pb.go
@@ -0,0 +1,537 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/common.proto
+
+package common
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type TestModeKeys int32
+
+const (
+	TestModeKeys_api_test TestModeKeys = 0
+)
+
+var TestModeKeys_name = map[int32]string{
+	0: "api_test",
+}
+
+var TestModeKeys_value = map[string]int32{
+	"api_test": 0,
+}
+
+func (x TestModeKeys) String() string {
+	return proto.EnumName(TestModeKeys_name, int32(x))
+}
+
+func (TestModeKeys) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{0}
+}
+
+// Administrative State
+type AdminState_Types int32
+
+const (
+	// The administrative state of the device is unknown
+	AdminState_UNKNOWN AdminState_Types = 0
+	// The device is pre-provisioned into Voltha, but not contacted by it
+	AdminState_PREPROVISIONED AdminState_Types = 1
+	// The device is enabled for activation and operation
+	AdminState_ENABLED AdminState_Types = 2
+	// The device is disabled and shall not perform its intended forwarding
+	// functions other than being available for re-activation.
+	AdminState_DISABLED AdminState_Types = 3
+	// The device is in the state of image download
+	AdminState_DOWNLOADING_IMAGE AdminState_Types = 4
+)
+
+var AdminState_Types_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "PREPROVISIONED",
+	2: "ENABLED",
+	3: "DISABLED",
+	4: "DOWNLOADING_IMAGE",
+}
+
+var AdminState_Types_value = map[string]int32{
+	"UNKNOWN":           0,
+	"PREPROVISIONED":    1,
+	"ENABLED":           2,
+	"DISABLED":          3,
+	"DOWNLOADING_IMAGE": 4,
+}
+
+func (x AdminState_Types) String() string {
+	return proto.EnumName(AdminState_Types_name, int32(x))
+}
+
+func (AdminState_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{3, 0}
+}
+
+// Operational Status
+type OperStatus_Types int32
+
+const (
+	// The status of the device is unknown at this point
+	OperStatus_UNKNOWN OperStatus_Types = 0
+	// The device has been discovered, but not yet activated
+	OperStatus_DISCOVERED OperStatus_Types = 1
+	// The device is being activated (booted, rebooted, upgraded, etc.)
+	OperStatus_ACTIVATING OperStatus_Types = 2
+	// Service impacting tests are being conducted
+	OperStatus_TESTING OperStatus_Types = 3
+	// The device is up and active
+	OperStatus_ACTIVE OperStatus_Types = 4
+	// The device has failed and cannot fulfill its intended role
+	OperStatus_FAILED OperStatus_Types = 5
+	// The device is reconciling
+	OperStatus_RECONCILING OperStatus_Types = 6
+	// The device is in reconciling failed
+	OperStatus_RECONCILING_FAILED OperStatus_Types = 7
+)
+
+var OperStatus_Types_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "DISCOVERED",
+	2: "ACTIVATING",
+	3: "TESTING",
+	4: "ACTIVE",
+	5: "FAILED",
+	6: "RECONCILING",
+	7: "RECONCILING_FAILED",
+}
+
+var OperStatus_Types_value = map[string]int32{
+	"UNKNOWN":            0,
+	"DISCOVERED":         1,
+	"ACTIVATING":         2,
+	"TESTING":            3,
+	"ACTIVE":             4,
+	"FAILED":             5,
+	"RECONCILING":        6,
+	"RECONCILING_FAILED": 7,
+}
+
+func (x OperStatus_Types) String() string {
+	return proto.EnumName(OperStatus_Types_name, int32(x))
+}
+
+func (OperStatus_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{4, 0}
+}
+
+// Connectivity Status
+type ConnectStatus_Types int32
+
+const (
+	// The device connectivity status is unknown
+	ConnectStatus_UNKNOWN ConnectStatus_Types = 0
+	// The device cannot be reached by Voltha
+	ConnectStatus_UNREACHABLE ConnectStatus_Types = 1
+	// There is live communication between device and Voltha
+	ConnectStatus_REACHABLE ConnectStatus_Types = 2
+)
+
+var ConnectStatus_Types_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "UNREACHABLE",
+	2: "REACHABLE",
+}
+
+var ConnectStatus_Types_value = map[string]int32{
+	"UNKNOWN":     0,
+	"UNREACHABLE": 1,
+	"REACHABLE":   2,
+}
+
+func (x ConnectStatus_Types) String() string {
+	return proto.EnumName(ConnectStatus_Types_name, int32(x))
+}
+
+func (ConnectStatus_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{5, 0}
+}
+
+type OperationResp_OperationReturnCode int32
+
+const (
+	OperationResp_OPERATION_SUCCESS     OperationResp_OperationReturnCode = 0
+	OperationResp_OPERATION_FAILURE     OperationResp_OperationReturnCode = 1
+	OperationResp_OPERATION_UNSUPPORTED OperationResp_OperationReturnCode = 2
+	OperationResp_OPERATION_IN_PROGRESS OperationResp_OperationReturnCode = 3
+)
+
+var OperationResp_OperationReturnCode_name = map[int32]string{
+	0: "OPERATION_SUCCESS",
+	1: "OPERATION_FAILURE",
+	2: "OPERATION_UNSUPPORTED",
+	3: "OPERATION_IN_PROGRESS",
+}
+
+var OperationResp_OperationReturnCode_value = map[string]int32{
+	"OPERATION_SUCCESS":     0,
+	"OPERATION_FAILURE":     1,
+	"OPERATION_UNSUPPORTED": 2,
+	"OPERATION_IN_PROGRESS": 3,
+}
+
+func (x OperationResp_OperationReturnCode) String() string {
+	return proto.EnumName(OperationResp_OperationReturnCode_name, int32(x))
+}
+
+func (OperationResp_OperationReturnCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{6, 0}
+}
+
+// Convey a resource identifier
+type ID struct {
+	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ID) Reset()         { *m = ID{} }
+func (m *ID) String() string { return proto.CompactTextString(m) }
+func (*ID) ProtoMessage()    {}
+func (*ID) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{0}
+}
+
+func (m *ID) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ID.Unmarshal(m, b)
+}
+func (m *ID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ID.Marshal(b, m, deterministic)
+}
+func (m *ID) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ID.Merge(m, src)
+}
+func (m *ID) XXX_Size() int {
+	return xxx_messageInfo_ID.Size(m)
+}
+func (m *ID) XXX_DiscardUnknown() {
+	xxx_messageInfo_ID.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ID proto.InternalMessageInfo
+
+func (m *ID) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+// Represents a list of IDs
+type IDs struct {
+	Items                []*ID    `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *IDs) Reset()         { *m = IDs{} }
+func (m *IDs) String() string { return proto.CompactTextString(m) }
+func (*IDs) ProtoMessage()    {}
+func (*IDs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{1}
+}
+
+func (m *IDs) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_IDs.Unmarshal(m, b)
+}
+func (m *IDs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_IDs.Marshal(b, m, deterministic)
+}
+func (m *IDs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_IDs.Merge(m, src)
+}
+func (m *IDs) XXX_Size() int {
+	return xxx_messageInfo_IDs.Size(m)
+}
+func (m *IDs) XXX_DiscardUnknown() {
+	xxx_messageInfo_IDs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_IDs proto.InternalMessageInfo
+
+func (m *IDs) GetItems() []*ID {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type Connection struct {
+	// endpoint is the endpoint sending the request
+	Endpoint string `protobuf:"bytes,1,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+	// contextInfo represents additional contextual information
+	ContextInfo string `protobuf:"bytes,2,opt,name=contextInfo,proto3" json:"contextInfo,omitempty"`
+	// keep_alive_interval is used to indicate to the remote endpoint how often it
+	// will get a keep alive notification
+	KeepAliveInterval    int64    `protobuf:"varint,3,opt,name=keep_alive_interval,json=keepAliveInterval,proto3" json:"keep_alive_interval,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Connection) Reset()         { *m = Connection{} }
+func (m *Connection) String() string { return proto.CompactTextString(m) }
+func (*Connection) ProtoMessage()    {}
+func (*Connection) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{2}
+}
+
+func (m *Connection) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Connection.Unmarshal(m, b)
+}
+func (m *Connection) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Connection.Marshal(b, m, deterministic)
+}
+func (m *Connection) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Connection.Merge(m, src)
+}
+func (m *Connection) XXX_Size() int {
+	return xxx_messageInfo_Connection.Size(m)
+}
+func (m *Connection) XXX_DiscardUnknown() {
+	xxx_messageInfo_Connection.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Connection proto.InternalMessageInfo
+
+func (m *Connection) GetEndpoint() string {
+	if m != nil {
+		return m.Endpoint
+	}
+	return ""
+}
+
+func (m *Connection) GetContextInfo() string {
+	if m != nil {
+		return m.ContextInfo
+	}
+	return ""
+}
+
+func (m *Connection) GetKeepAliveInterval() int64 {
+	if m != nil {
+		return m.KeepAliveInterval
+	}
+	return 0
+}
+
+type AdminState struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AdminState) Reset()         { *m = AdminState{} }
+func (m *AdminState) String() string { return proto.CompactTextString(m) }
+func (*AdminState) ProtoMessage()    {}
+func (*AdminState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{3}
+}
+
+func (m *AdminState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AdminState.Unmarshal(m, b)
+}
+func (m *AdminState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AdminState.Marshal(b, m, deterministic)
+}
+func (m *AdminState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdminState.Merge(m, src)
+}
+func (m *AdminState) XXX_Size() int {
+	return xxx_messageInfo_AdminState.Size(m)
+}
+func (m *AdminState) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdminState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdminState proto.InternalMessageInfo
+
+type OperStatus struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OperStatus) Reset()         { *m = OperStatus{} }
+func (m *OperStatus) String() string { return proto.CompactTextString(m) }
+func (*OperStatus) ProtoMessage()    {}
+func (*OperStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{4}
+}
+
+func (m *OperStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OperStatus.Unmarshal(m, b)
+}
+func (m *OperStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OperStatus.Marshal(b, m, deterministic)
+}
+func (m *OperStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OperStatus.Merge(m, src)
+}
+func (m *OperStatus) XXX_Size() int {
+	return xxx_messageInfo_OperStatus.Size(m)
+}
+func (m *OperStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_OperStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OperStatus proto.InternalMessageInfo
+
+type ConnectStatus struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ConnectStatus) Reset()         { *m = ConnectStatus{} }
+func (m *ConnectStatus) String() string { return proto.CompactTextString(m) }
+func (*ConnectStatus) ProtoMessage()    {}
+func (*ConnectStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{5}
+}
+
+func (m *ConnectStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ConnectStatus.Unmarshal(m, b)
+}
+func (m *ConnectStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ConnectStatus.Marshal(b, m, deterministic)
+}
+func (m *ConnectStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConnectStatus.Merge(m, src)
+}
+func (m *ConnectStatus) XXX_Size() int {
+	return xxx_messageInfo_ConnectStatus.Size(m)
+}
+func (m *ConnectStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConnectStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConnectStatus proto.InternalMessageInfo
+
+type OperationResp struct {
+	// Return code
+	Code OperationResp_OperationReturnCode `protobuf:"varint,1,opt,name=code,proto3,enum=common.OperationResp_OperationReturnCode" json:"code,omitempty"`
+	// Additional Info
+	AdditionalInfo       string   `protobuf:"bytes,2,opt,name=additional_info,json=additionalInfo,proto3" json:"additional_info,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OperationResp) Reset()         { *m = OperationResp{} }
+func (m *OperationResp) String() string { return proto.CompactTextString(m) }
+func (*OperationResp) ProtoMessage()    {}
+func (*OperationResp) Descriptor() ([]byte, []int) {
+	return fileDescriptor_c2e3fd231961e826, []int{6}
+}
+
+func (m *OperationResp) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OperationResp.Unmarshal(m, b)
+}
+func (m *OperationResp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OperationResp.Marshal(b, m, deterministic)
+}
+func (m *OperationResp) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OperationResp.Merge(m, src)
+}
+func (m *OperationResp) XXX_Size() int {
+	return xxx_messageInfo_OperationResp.Size(m)
+}
+func (m *OperationResp) XXX_DiscardUnknown() {
+	xxx_messageInfo_OperationResp.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OperationResp proto.InternalMessageInfo
+
+func (m *OperationResp) GetCode() OperationResp_OperationReturnCode {
+	if m != nil {
+		return m.Code
+	}
+	return OperationResp_OPERATION_SUCCESS
+}
+
+func (m *OperationResp) GetAdditionalInfo() string {
+	if m != nil {
+		return m.AdditionalInfo
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterEnum("common.TestModeKeys", TestModeKeys_name, TestModeKeys_value)
+	proto.RegisterEnum("common.AdminState_Types", AdminState_Types_name, AdminState_Types_value)
+	proto.RegisterEnum("common.OperStatus_Types", OperStatus_Types_name, OperStatus_Types_value)
+	proto.RegisterEnum("common.ConnectStatus_Types", ConnectStatus_Types_name, ConnectStatus_Types_value)
+	proto.RegisterEnum("common.OperationResp_OperationReturnCode", OperationResp_OperationReturnCode_name, OperationResp_OperationReturnCode_value)
+	proto.RegisterType((*ID)(nil), "common.ID")
+	proto.RegisterType((*IDs)(nil), "common.IDs")
+	proto.RegisterType((*Connection)(nil), "common.Connection")
+	proto.RegisterType((*AdminState)(nil), "common.AdminState")
+	proto.RegisterType((*OperStatus)(nil), "common.OperStatus")
+	proto.RegisterType((*ConnectStatus)(nil), "common.ConnectStatus")
+	proto.RegisterType((*OperationResp)(nil), "common.OperationResp")
+}
+
+func init() { proto.RegisterFile("voltha_protos/common.proto", fileDescriptor_c2e3fd231961e826) }
+
+var fileDescriptor_c2e3fd231961e826 = []byte{
+	// 570 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x53, 0x5d, 0x4f, 0xdb, 0x30,
+	0x14, 0x25, 0x09, 0x14, 0xb8, 0x85, 0x12, 0xcc, 0x98, 0x3a, 0xb4, 0x87, 0x2a, 0x2f, 0xb0, 0x49,
+	0x6b, 0x25, 0xb6, 0x3d, 0xee, 0x21, 0x24, 0x5e, 0x67, 0x01, 0x4e, 0xe5, 0xa4, 0x20, 0xf1, 0x12,
+	0x85, 0xc6, 0x03, 0x6b, 0xad, 0x1d, 0x25, 0xa6, 0x1a, 0x7b, 0xdc, 0x3f, 0xd8, 0x5f, 0xdd, 0x2f,
+	0x98, 0x9c, 0x86, 0x15, 0xa4, 0xbe, 0xe5, 0x9c, 0x73, 0x73, 0xbf, 0x8e, 0x2f, 0x1c, 0xcd, 0xd5,
+	0x54, 0xdf, 0x67, 0x69, 0x51, 0x2a, 0xad, 0xaa, 0xc1, 0x44, 0xcd, 0x66, 0x4a, 0xf6, 0x6b, 0x84,
+	0x5a, 0x0b, 0xe4, 0xbd, 0x02, 0x9b, 0x84, 0xa8, 0x03, 0xb6, 0xc8, 0xbb, 0x56, 0xcf, 0x3a, 0xd9,
+	0x66, 0xb6, 0xc8, 0xbd, 0x63, 0x70, 0x48, 0x58, 0xa1, 0x1e, 0x6c, 0x08, 0xcd, 0x67, 0x55, 0xd7,
+	0xea, 0x39, 0x27, 0xed, 0x53, 0xe8, 0x37, 0x29, 0x48, 0xc8, 0x16, 0x82, 0xf7, 0x0b, 0x20, 0x50,
+	0x52, 0xf2, 0x89, 0x16, 0x4a, 0xa2, 0x23, 0xd8, 0xe2, 0x32, 0x2f, 0x94, 0x90, 0xba, 0x49, 0xf6,
+	0x1f, 0xa3, 0x1e, 0xb4, 0x27, 0x4a, 0x6a, 0xfe, 0x53, 0x13, 0xf9, 0x5d, 0x75, 0xed, 0x5a, 0x7e,
+	0x4e, 0xa1, 0x3e, 0x1c, 0xfc, 0xe0, 0xbc, 0x48, 0xb3, 0xa9, 0x98, 0xf3, 0x54, 0x48, 0xcd, 0xcb,
+	0x79, 0x36, 0xed, 0x3a, 0x3d, 0xeb, 0xc4, 0x61, 0xfb, 0x46, 0xf2, 0x8d, 0x42, 0x1a, 0xc1, 0xbb,
+	0x07, 0xf0, 0xf3, 0x99, 0x90, 0xb1, 0xce, 0x34, 0xf7, 0x6e, 0x60, 0x23, 0x79, 0x2c, 0x78, 0x85,
+	0xda, 0xb0, 0x39, 0xa6, 0xe7, 0x34, 0xba, 0xa6, 0xee, 0x1a, 0x42, 0xd0, 0x19, 0x31, 0x3c, 0x62,
+	0xd1, 0x15, 0x89, 0x49, 0x44, 0x71, 0xe8, 0x5a, 0x26, 0x00, 0x53, 0xff, 0xec, 0x02, 0x87, 0xae,
+	0x8d, 0x76, 0x60, 0x2b, 0x24, 0xf1, 0x02, 0x39, 0xe8, 0x10, 0xf6, 0xc3, 0xe8, 0x9a, 0x5e, 0x44,
+	0x7e, 0x48, 0xe8, 0x30, 0x25, 0x97, 0xfe, 0x10, 0xbb, 0xeb, 0xde, 0x1f, 0x0b, 0x20, 0x2a, 0x78,
+	0x69, 0x2a, 0x3d, 0x54, 0xde, 0x6f, 0x6b, 0x65, 0xad, 0x0e, 0x40, 0x48, 0xe2, 0x20, 0xba, 0xc2,
+	0xac, 0xae, 0xd3, 0x01, 0xf0, 0x83, 0x84, 0x5c, 0xf9, 0x09, 0xa1, 0x43, 0xd7, 0x36, 0xc1, 0x09,
+	0x8e, 0x6b, 0xe0, 0x20, 0x80, 0x56, 0x2d, 0x62, 0x77, 0xdd, 0x7c, 0x7f, 0xf5, 0x89, 0xe9, 0x60,
+	0x03, 0xed, 0x41, 0x9b, 0xe1, 0x20, 0xa2, 0x01, 0xb9, 0x30, 0x81, 0x2d, 0xf4, 0x1a, 0xd0, 0x33,
+	0x22, 0x6d, 0x02, 0x37, 0x3d, 0x0c, 0xbb, 0xcd, 0xe6, 0x9b, 0xae, 0x3e, 0xad, 0x6c, 0x6a, 0x0f,
+	0xda, 0x63, 0xca, 0xb0, 0x1f, 0x7c, 0x33, 0x33, 0xba, 0x16, 0xda, 0x85, 0xed, 0x25, 0xb4, 0xbd,
+	0xbf, 0x16, 0xec, 0x9a, 0xd1, 0x32, 0x63, 0x20, 0xe3, 0x55, 0x81, 0xbe, 0xc0, 0xfa, 0x44, 0xe5,
+	0xbc, 0x36, 0xb0, 0x73, 0xfa, 0xee, 0xc9, 0xf3, 0x17, 0x41, 0xcf, 0x91, 0x7e, 0x28, 0x65, 0xa0,
+	0x72, 0xce, 0xea, 0xdf, 0xd0, 0x31, 0xec, 0x65, 0x79, 0x2e, 0x8c, 0x96, 0x4d, 0x53, 0xb1, 0xf4,
+	0xba, 0xb3, 0xa4, 0x8d, 0xdd, 0xde, 0x23, 0x1c, 0xac, 0xc8, 0x62, 0x2c, 0x88, 0x46, 0x98, 0xf9,
+	0x09, 0x89, 0x68, 0x1a, 0x8f, 0x83, 0x00, 0xc7, 0xb1, 0xbb, 0xf6, 0x92, 0x36, 0x4b, 0x18, 0x33,
+	0x33, 0xcd, 0x1b, 0x38, 0x5c, 0xd2, 0x63, 0x1a, 0x8f, 0x47, 0xa3, 0x88, 0x25, 0xb5, 0xb3, 0x2f,
+	0x24, 0x42, 0xd3, 0x11, 0x8b, 0x86, 0xcc, 0x24, 0x73, 0xde, 0xbf, 0x85, 0x9d, 0x84, 0x57, 0xfa,
+	0x52, 0xe5, 0xfc, 0x9c, 0x3f, 0x56, 0xe6, 0x11, 0x64, 0x85, 0x48, 0x35, 0xaf, 0xb4, 0xbb, 0x76,
+	0x86, 0xe1, 0x40, 0x95, 0x77, 0x7d, 0x55, 0x70, 0x39, 0x51, 0x65, 0xde, 0x5f, 0x5c, 0xd1, 0x4d,
+	0xff, 0x4e, 0xe8, 0xfb, 0x87, 0x5b, 0xb3, 0x8f, 0xc1, 0x93, 0x36, 0x58, 0x68, 0x1f, 0x9a, 0x0b,
+	0x9b, 0x7f, 0x1e, 0xdc, 0xa9, 0xe6, 0xce, 0x6e, 0x5b, 0x35, 0xf9, 0xf1, 0x5f, 0x00, 0x00, 0x00,
+	0xff, 0xff, 0x62, 0x34, 0x9d, 0xbf, 0x86, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/ext/config/ext_config.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/ext/config/ext_config.pb.go
new file mode 100644
index 0000000..2db2ed1
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/ext/config/ext_config.pb.go
@@ -0,0 +1,523 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/ext_config.proto
+
+package config
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type OnuItuPonAlarm_AlarmID int32
+
+const (
+	OnuItuPonAlarm_RDI_ERRORS OnuItuPonAlarm_AlarmID = 0
+)
+
+var OnuItuPonAlarm_AlarmID_name = map[int32]string{
+	0: "RDI_ERRORS",
+}
+
+var OnuItuPonAlarm_AlarmID_value = map[string]int32{
+	"RDI_ERRORS": 0,
+}
+
+func (x OnuItuPonAlarm_AlarmID) String() string {
+	return proto.EnumName(OnuItuPonAlarm_AlarmID_name, int32(x))
+}
+
+func (OnuItuPonAlarm_AlarmID) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1, 0}
+}
+
+type OnuItuPonAlarm_AlarmReportingCondition int32
+
+const (
+	OnuItuPonAlarm_RATE_THRESHOLD  OnuItuPonAlarm_AlarmReportingCondition = 0
+	OnuItuPonAlarm_RATE_RANGE      OnuItuPonAlarm_AlarmReportingCondition = 1
+	OnuItuPonAlarm_VALUE_THRESHOLD OnuItuPonAlarm_AlarmReportingCondition = 2
+)
+
+var OnuItuPonAlarm_AlarmReportingCondition_name = map[int32]string{
+	0: "RATE_THRESHOLD",
+	1: "RATE_RANGE",
+	2: "VALUE_THRESHOLD",
+}
+
+var OnuItuPonAlarm_AlarmReportingCondition_value = map[string]int32{
+	"RATE_THRESHOLD":  0,
+	"RATE_RANGE":      1,
+	"VALUE_THRESHOLD": 2,
+}
+
+func (x OnuItuPonAlarm_AlarmReportingCondition) String() string {
+	return proto.EnumName(OnuItuPonAlarm_AlarmReportingCondition_name, int32(x))
+}
+
+func (OnuItuPonAlarm_AlarmReportingCondition) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1, 1}
+}
+
+type AlarmConfig struct {
+	// Types that are valid to be assigned to Config:
+	//	*AlarmConfig_OnuItuPonAlarmConfig
+	Config               isAlarmConfig_Config `protobuf_oneof:"config"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *AlarmConfig) Reset()         { *m = AlarmConfig{} }
+func (m *AlarmConfig) String() string { return proto.CompactTextString(m) }
+func (*AlarmConfig) ProtoMessage()    {}
+func (*AlarmConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{0}
+}
+
+func (m *AlarmConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmConfig.Unmarshal(m, b)
+}
+func (m *AlarmConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmConfig.Marshal(b, m, deterministic)
+}
+func (m *AlarmConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmConfig.Merge(m, src)
+}
+func (m *AlarmConfig) XXX_Size() int {
+	return xxx_messageInfo_AlarmConfig.Size(m)
+}
+func (m *AlarmConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmConfig proto.InternalMessageInfo
+
+type isAlarmConfig_Config interface {
+	isAlarmConfig_Config()
+}
+
+type AlarmConfig_OnuItuPonAlarmConfig struct {
+	OnuItuPonAlarmConfig *OnuItuPonAlarm `protobuf:"bytes,1,opt,name=onu_itu_pon_alarm_config,json=onuItuPonAlarmConfig,proto3,oneof"`
+}
+
+func (*AlarmConfig_OnuItuPonAlarmConfig) isAlarmConfig_Config() {}
+
+func (m *AlarmConfig) GetConfig() isAlarmConfig_Config {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *AlarmConfig) GetOnuItuPonAlarmConfig() *OnuItuPonAlarm {
+	if x, ok := m.GetConfig().(*AlarmConfig_OnuItuPonAlarmConfig); ok {
+		return x.OnuItuPonAlarmConfig
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*AlarmConfig) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*AlarmConfig_OnuItuPonAlarmConfig)(nil),
+	}
+}
+
+type OnuItuPonAlarm struct {
+	PonNi                   uint32                                 `protobuf:"fixed32,1,opt,name=pon_ni,json=ponNi,proto3" json:"pon_ni,omitempty"`
+	OnuId                   uint32                                 `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	AlarmId                 OnuItuPonAlarm_AlarmID                 `protobuf:"varint,3,opt,name=alarm_id,json=alarmId,proto3,enum=config.OnuItuPonAlarm_AlarmID" json:"alarm_id,omitempty"`
+	AlarmReportingCondition OnuItuPonAlarm_AlarmReportingCondition `protobuf:"varint,4,opt,name=alarm_reporting_condition,json=alarmReportingCondition,proto3,enum=config.OnuItuPonAlarm_AlarmReportingCondition" json:"alarm_reporting_condition,omitempty"`
+	// Types that are valid to be assigned to Config:
+	//	*OnuItuPonAlarm_RateThresholdConfig_
+	//	*OnuItuPonAlarm_RateRangeConfig_
+	//	*OnuItuPonAlarm_ValueThresholdConfig_
+	Config               isOnuItuPonAlarm_Config `protobuf_oneof:"config"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *OnuItuPonAlarm) Reset()         { *m = OnuItuPonAlarm{} }
+func (m *OnuItuPonAlarm) String() string { return proto.CompactTextString(m) }
+func (*OnuItuPonAlarm) ProtoMessage()    {}
+func (*OnuItuPonAlarm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1}
+}
+
+func (m *OnuItuPonAlarm) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuItuPonAlarm.Unmarshal(m, b)
+}
+func (m *OnuItuPonAlarm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuItuPonAlarm.Marshal(b, m, deterministic)
+}
+func (m *OnuItuPonAlarm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuItuPonAlarm.Merge(m, src)
+}
+func (m *OnuItuPonAlarm) XXX_Size() int {
+	return xxx_messageInfo_OnuItuPonAlarm.Size(m)
+}
+func (m *OnuItuPonAlarm) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuItuPonAlarm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuItuPonAlarm proto.InternalMessageInfo
+
+func (m *OnuItuPonAlarm) GetPonNi() uint32 {
+	if m != nil {
+		return m.PonNi
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm) GetAlarmId() OnuItuPonAlarm_AlarmID {
+	if m != nil {
+		return m.AlarmId
+	}
+	return OnuItuPonAlarm_RDI_ERRORS
+}
+
+func (m *OnuItuPonAlarm) GetAlarmReportingCondition() OnuItuPonAlarm_AlarmReportingCondition {
+	if m != nil {
+		return m.AlarmReportingCondition
+	}
+	return OnuItuPonAlarm_RATE_THRESHOLD
+}
+
+type isOnuItuPonAlarm_Config interface {
+	isOnuItuPonAlarm_Config()
+}
+
+type OnuItuPonAlarm_RateThresholdConfig_ struct {
+	RateThresholdConfig *OnuItuPonAlarm_RateThresholdConfig `protobuf:"bytes,5,opt,name=rate_threshold_config,json=rateThresholdConfig,proto3,oneof"`
+}
+
+type OnuItuPonAlarm_RateRangeConfig_ struct {
+	RateRangeConfig *OnuItuPonAlarm_RateRangeConfig `protobuf:"bytes,6,opt,name=rate_range_config,json=rateRangeConfig,proto3,oneof"`
+}
+
+type OnuItuPonAlarm_ValueThresholdConfig_ struct {
+	ValueThresholdConfig *OnuItuPonAlarm_ValueThresholdConfig `protobuf:"bytes,7,opt,name=value_threshold_config,json=valueThresholdConfig,proto3,oneof"`
+}
+
+func (*OnuItuPonAlarm_RateThresholdConfig_) isOnuItuPonAlarm_Config() {}
+
+func (*OnuItuPonAlarm_RateRangeConfig_) isOnuItuPonAlarm_Config() {}
+
+func (*OnuItuPonAlarm_ValueThresholdConfig_) isOnuItuPonAlarm_Config() {}
+
+func (m *OnuItuPonAlarm) GetConfig() isOnuItuPonAlarm_Config {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *OnuItuPonAlarm) GetRateThresholdConfig() *OnuItuPonAlarm_RateThresholdConfig {
+	if x, ok := m.GetConfig().(*OnuItuPonAlarm_RateThresholdConfig_); ok {
+		return x.RateThresholdConfig
+	}
+	return nil
+}
+
+func (m *OnuItuPonAlarm) GetRateRangeConfig() *OnuItuPonAlarm_RateRangeConfig {
+	if x, ok := m.GetConfig().(*OnuItuPonAlarm_RateRangeConfig_); ok {
+		return x.RateRangeConfig
+	}
+	return nil
+}
+
+func (m *OnuItuPonAlarm) GetValueThresholdConfig() *OnuItuPonAlarm_ValueThresholdConfig {
+	if x, ok := m.GetConfig().(*OnuItuPonAlarm_ValueThresholdConfig_); ok {
+		return x.ValueThresholdConfig
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OnuItuPonAlarm) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OnuItuPonAlarm_RateThresholdConfig_)(nil),
+		(*OnuItuPonAlarm_RateRangeConfig_)(nil),
+		(*OnuItuPonAlarm_ValueThresholdConfig_)(nil),
+	}
+}
+
+type OnuItuPonAlarm_SoakTime struct {
+	ActiveSoakTime       uint32   `protobuf:"fixed32,1,opt,name=active_soak_time,json=activeSoakTime,proto3" json:"active_soak_time,omitempty"`
+	ClearSoakTime        uint32   `protobuf:"fixed32,2,opt,name=clear_soak_time,json=clearSoakTime,proto3" json:"clear_soak_time,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuItuPonAlarm_SoakTime) Reset()         { *m = OnuItuPonAlarm_SoakTime{} }
+func (m *OnuItuPonAlarm_SoakTime) String() string { return proto.CompactTextString(m) }
+func (*OnuItuPonAlarm_SoakTime) ProtoMessage()    {}
+func (*OnuItuPonAlarm_SoakTime) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1, 0}
+}
+
+func (m *OnuItuPonAlarm_SoakTime) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuItuPonAlarm_SoakTime.Unmarshal(m, b)
+}
+func (m *OnuItuPonAlarm_SoakTime) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuItuPonAlarm_SoakTime.Marshal(b, m, deterministic)
+}
+func (m *OnuItuPonAlarm_SoakTime) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuItuPonAlarm_SoakTime.Merge(m, src)
+}
+func (m *OnuItuPonAlarm_SoakTime) XXX_Size() int {
+	return xxx_messageInfo_OnuItuPonAlarm_SoakTime.Size(m)
+}
+func (m *OnuItuPonAlarm_SoakTime) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuItuPonAlarm_SoakTime.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuItuPonAlarm_SoakTime proto.InternalMessageInfo
+
+func (m *OnuItuPonAlarm_SoakTime) GetActiveSoakTime() uint32 {
+	if m != nil {
+		return m.ActiveSoakTime
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm_SoakTime) GetClearSoakTime() uint32 {
+	if m != nil {
+		return m.ClearSoakTime
+	}
+	return 0
+}
+
+type OnuItuPonAlarm_RateThresholdConfig struct {
+	RateThresholdRising  uint64                   `protobuf:"fixed64,1,opt,name=rate_threshold_rising,json=rateThresholdRising,proto3" json:"rate_threshold_rising,omitempty"`
+	RateThresholdFalling uint64                   `protobuf:"fixed64,2,opt,name=rate_threshold_falling,json=rateThresholdFalling,proto3" json:"rate_threshold_falling,omitempty"`
+	SoakTime             *OnuItuPonAlarm_SoakTime `protobuf:"bytes,3,opt,name=soak_time,json=soakTime,proto3" json:"soak_time,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *OnuItuPonAlarm_RateThresholdConfig) Reset()         { *m = OnuItuPonAlarm_RateThresholdConfig{} }
+func (m *OnuItuPonAlarm_RateThresholdConfig) String() string { return proto.CompactTextString(m) }
+func (*OnuItuPonAlarm_RateThresholdConfig) ProtoMessage()    {}
+func (*OnuItuPonAlarm_RateThresholdConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1, 1}
+}
+
+func (m *OnuItuPonAlarm_RateThresholdConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuItuPonAlarm_RateThresholdConfig.Unmarshal(m, b)
+}
+func (m *OnuItuPonAlarm_RateThresholdConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuItuPonAlarm_RateThresholdConfig.Marshal(b, m, deterministic)
+}
+func (m *OnuItuPonAlarm_RateThresholdConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuItuPonAlarm_RateThresholdConfig.Merge(m, src)
+}
+func (m *OnuItuPonAlarm_RateThresholdConfig) XXX_Size() int {
+	return xxx_messageInfo_OnuItuPonAlarm_RateThresholdConfig.Size(m)
+}
+func (m *OnuItuPonAlarm_RateThresholdConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuItuPonAlarm_RateThresholdConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuItuPonAlarm_RateThresholdConfig proto.InternalMessageInfo
+
+func (m *OnuItuPonAlarm_RateThresholdConfig) GetRateThresholdRising() uint64 {
+	if m != nil {
+		return m.RateThresholdRising
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm_RateThresholdConfig) GetRateThresholdFalling() uint64 {
+	if m != nil {
+		return m.RateThresholdFalling
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm_RateThresholdConfig) GetSoakTime() *OnuItuPonAlarm_SoakTime {
+	if m != nil {
+		return m.SoakTime
+	}
+	return nil
+}
+
+type OnuItuPonAlarm_RateRangeConfig struct {
+	RateRangeLower       uint64                   `protobuf:"fixed64,1,opt,name=rate_range_lower,json=rateRangeLower,proto3" json:"rate_range_lower,omitempty"`
+	RateRangeUpper       uint64                   `protobuf:"fixed64,2,opt,name=rate_range_upper,json=rateRangeUpper,proto3" json:"rate_range_upper,omitempty"`
+	SoakTime             *OnuItuPonAlarm_SoakTime `protobuf:"bytes,3,opt,name=soak_time,json=soakTime,proto3" json:"soak_time,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *OnuItuPonAlarm_RateRangeConfig) Reset()         { *m = OnuItuPonAlarm_RateRangeConfig{} }
+func (m *OnuItuPonAlarm_RateRangeConfig) String() string { return proto.CompactTextString(m) }
+func (*OnuItuPonAlarm_RateRangeConfig) ProtoMessage()    {}
+func (*OnuItuPonAlarm_RateRangeConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1, 2}
+}
+
+func (m *OnuItuPonAlarm_RateRangeConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuItuPonAlarm_RateRangeConfig.Unmarshal(m, b)
+}
+func (m *OnuItuPonAlarm_RateRangeConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuItuPonAlarm_RateRangeConfig.Marshal(b, m, deterministic)
+}
+func (m *OnuItuPonAlarm_RateRangeConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuItuPonAlarm_RateRangeConfig.Merge(m, src)
+}
+func (m *OnuItuPonAlarm_RateRangeConfig) XXX_Size() int {
+	return xxx_messageInfo_OnuItuPonAlarm_RateRangeConfig.Size(m)
+}
+func (m *OnuItuPonAlarm_RateRangeConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuItuPonAlarm_RateRangeConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuItuPonAlarm_RateRangeConfig proto.InternalMessageInfo
+
+func (m *OnuItuPonAlarm_RateRangeConfig) GetRateRangeLower() uint64 {
+	if m != nil {
+		return m.RateRangeLower
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm_RateRangeConfig) GetRateRangeUpper() uint64 {
+	if m != nil {
+		return m.RateRangeUpper
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm_RateRangeConfig) GetSoakTime() *OnuItuPonAlarm_SoakTime {
+	if m != nil {
+		return m.SoakTime
+	}
+	return nil
+}
+
+type OnuItuPonAlarm_ValueThresholdConfig struct {
+	ThresholdLimit       uint64                   `protobuf:"fixed64,1,opt,name=threshold_limit,json=thresholdLimit,proto3" json:"threshold_limit,omitempty"`
+	SoakTime             *OnuItuPonAlarm_SoakTime `protobuf:"bytes,2,opt,name=soak_time,json=soakTime,proto3" json:"soak_time,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *OnuItuPonAlarm_ValueThresholdConfig) Reset()         { *m = OnuItuPonAlarm_ValueThresholdConfig{} }
+func (m *OnuItuPonAlarm_ValueThresholdConfig) String() string { return proto.CompactTextString(m) }
+func (*OnuItuPonAlarm_ValueThresholdConfig) ProtoMessage()    {}
+func (*OnuItuPonAlarm_ValueThresholdConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_fb43b44b7fa3aba9, []int{1, 3}
+}
+
+func (m *OnuItuPonAlarm_ValueThresholdConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuItuPonAlarm_ValueThresholdConfig.Unmarshal(m, b)
+}
+func (m *OnuItuPonAlarm_ValueThresholdConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuItuPonAlarm_ValueThresholdConfig.Marshal(b, m, deterministic)
+}
+func (m *OnuItuPonAlarm_ValueThresholdConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuItuPonAlarm_ValueThresholdConfig.Merge(m, src)
+}
+func (m *OnuItuPonAlarm_ValueThresholdConfig) XXX_Size() int {
+	return xxx_messageInfo_OnuItuPonAlarm_ValueThresholdConfig.Size(m)
+}
+func (m *OnuItuPonAlarm_ValueThresholdConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuItuPonAlarm_ValueThresholdConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuItuPonAlarm_ValueThresholdConfig proto.InternalMessageInfo
+
+func (m *OnuItuPonAlarm_ValueThresholdConfig) GetThresholdLimit() uint64 {
+	if m != nil {
+		return m.ThresholdLimit
+	}
+	return 0
+}
+
+func (m *OnuItuPonAlarm_ValueThresholdConfig) GetSoakTime() *OnuItuPonAlarm_SoakTime {
+	if m != nil {
+		return m.SoakTime
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("config.OnuItuPonAlarm_AlarmID", OnuItuPonAlarm_AlarmID_name, OnuItuPonAlarm_AlarmID_value)
+	proto.RegisterEnum("config.OnuItuPonAlarm_AlarmReportingCondition", OnuItuPonAlarm_AlarmReportingCondition_name, OnuItuPonAlarm_AlarmReportingCondition_value)
+	proto.RegisterType((*AlarmConfig)(nil), "config.AlarmConfig")
+	proto.RegisterType((*OnuItuPonAlarm)(nil), "config.OnuItuPonAlarm")
+	proto.RegisterType((*OnuItuPonAlarm_SoakTime)(nil), "config.OnuItuPonAlarm.SoakTime")
+	proto.RegisterType((*OnuItuPonAlarm_RateThresholdConfig)(nil), "config.OnuItuPonAlarm.RateThresholdConfig")
+	proto.RegisterType((*OnuItuPonAlarm_RateRangeConfig)(nil), "config.OnuItuPonAlarm.RateRangeConfig")
+	proto.RegisterType((*OnuItuPonAlarm_ValueThresholdConfig)(nil), "config.OnuItuPonAlarm.ValueThresholdConfig")
+}
+
+func init() { proto.RegisterFile("voltha_protos/ext_config.proto", fileDescriptor_fb43b44b7fa3aba9) }
+
+var fileDescriptor_fb43b44b7fa3aba9 = []byte{
+	// 610 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x54, 0xd1, 0x6e, 0x12, 0x41,
+	0x14, 0x05, 0x6a, 0x81, 0xde, 0x46, 0xc0, 0x81, 0xb6, 0xb4, 0x0f, 0xb5, 0xe9, 0x43, 0x6d, 0x34,
+	0x2e, 0x09, 0xea, 0x83, 0x89, 0x2f, 0xb4, 0x45, 0x21, 0x21, 0x6d, 0x33, 0xa5, 0x7d, 0x30, 0x26,
+	0xeb, 0x94, 0x9d, 0x2e, 0x63, 0x97, 0x99, 0xcd, 0x30, 0x8b, 0xbe, 0xf8, 0x35, 0x7e, 0x87, 0x7e,
+	0x9b, 0x99, 0x99, 0x5d, 0x28, 0xb0, 0x34, 0x31, 0xbe, 0x90, 0xec, 0xb9, 0xf7, 0x9c, 0x7b, 0x38,
+	0x77, 0x66, 0x60, 0x7f, 0x22, 0x02, 0x35, 0x24, 0x6e, 0x28, 0x85, 0x12, 0xe3, 0x06, 0xfd, 0xa1,
+	0xdc, 0x81, 0xe0, 0x77, 0xcc, 0x77, 0x0c, 0x82, 0xf2, 0xf6, 0xeb, 0x90, 0xc1, 0x66, 0x2b, 0x20,
+	0x72, 0x74, 0x6a, 0x3e, 0xd1, 0x25, 0xd4, 0x05, 0x8f, 0x5c, 0xa6, 0x22, 0x37, 0x14, 0xdc, 0x25,
+	0xba, 0x14, 0x13, 0xeb, 0xd9, 0x83, 0xec, 0xf1, 0x66, 0x73, 0xdb, 0x89, 0x75, 0x2e, 0x78, 0xd4,
+	0x55, 0xd1, 0xa5, 0xe0, 0x86, 0xdf, 0xc9, 0xe0, 0x9a, 0x98, 0x43, 0xac, 0xe2, 0x49, 0x11, 0x92,
+	0x51, 0x7f, 0x36, 0xa0, 0x34, 0x4f, 0x42, 0x5b, 0x90, 0xd7, 0x63, 0x38, 0x33, 0xe2, 0x05, 0xbc,
+	0x1e, 0x0a, 0x7e, 0xce, 0x34, 0x6c, 0x5c, 0x78, 0xf5, 0x9c, 0x85, 0xb5, 0xb2, 0x87, 0xde, 0x43,
+	0xd1, 0x1a, 0x62, 0x5e, 0x7d, 0xed, 0x20, 0x7b, 0x5c, 0x6a, 0xee, 0xa7, 0x9b, 0x71, 0xcc, 0x6f,
+	0xf7, 0x0c, 0x17, 0x4c, 0x7f, 0xd7, 0x43, 0xdf, 0x60, 0xd7, 0x52, 0x25, 0x0d, 0x85, 0x54, 0x8c,
+	0xfb, 0xfa, 0x5f, 0x79, 0x4c, 0x31, 0xc1, 0xeb, 0x4f, 0x8c, 0x96, 0xf3, 0x98, 0x16, 0x4e, 0x68,
+	0xa7, 0x09, 0x0b, 0xef, 0x90, 0xf4, 0x02, 0xfa, 0x0a, 0x5b, 0x92, 0x28, 0xea, 0xaa, 0xa1, 0xa4,
+	0xe3, 0xa1, 0x08, 0xbc, 0x24, 0xc0, 0x75, 0x13, 0xe0, 0xcb, 0x15, 0x73, 0x30, 0x51, 0xb4, 0x9f,
+	0x50, 0x6c, 0x78, 0x9d, 0x0c, 0xae, 0xca, 0x65, 0x18, 0xf5, 0xe1, 0x99, 0x99, 0x20, 0x09, 0xf7,
+	0x69, 0xa2, 0x9e, 0x37, 0xea, 0x47, 0x8f, 0xa8, 0x63, 0xdd, 0x3e, 0x55, 0x2e, 0xcb, 0x79, 0x08,
+	0x0d, 0x60, 0x7b, 0x42, 0x82, 0x28, 0xc5, 0x78, 0xc1, 0x48, 0xbf, 0x5a, 0x21, 0x7d, 0xa3, 0x49,
+	0xcb, 0xce, 0x6b, 0x93, 0x14, 0x7c, 0xef, 0x0b, 0x14, 0xaf, 0x04, 0xb9, 0xef, 0xb3, 0x11, 0x45,
+	0xc7, 0x50, 0x21, 0x03, 0xc5, 0x26, 0xd4, 0x1d, 0x0b, 0x72, 0xef, 0x2a, 0x36, 0xa2, 0xf1, 0x39,
+	0x28, 0x59, 0x7c, 0xda, 0x79, 0x04, 0xe5, 0x41, 0x40, 0x89, 0x7c, 0xd0, 0x68, 0x4f, 0xc6, 0x53,
+	0x03, 0x27, 0x7d, 0x7b, 0xbf, 0xb3, 0x50, 0x4d, 0xc9, 0x11, 0x35, 0x97, 0x56, 0x22, 0xd9, 0x98,
+	0x71, 0x7b, 0xa6, 0xf3, 0x0b, 0x21, 0x63, 0x53, 0x42, 0x6f, 0x61, 0x7b, 0x81, 0x73, 0x47, 0x82,
+	0x40, 0x93, 0x72, 0x86, 0x54, 0x9b, 0x23, 0x7d, 0xb4, 0x35, 0xf4, 0x01, 0x36, 0x66, 0x1e, 0xd7,
+	0x4c, 0x6e, 0xcf, 0x57, 0xe4, 0x96, 0xb8, 0xc6, 0xc5, 0x71, 0xe2, 0xff, 0x57, 0x16, 0xca, 0x0b,
+	0x9b, 0xd2, 0x29, 0x3d, 0x58, 0x76, 0x20, 0xbe, 0x53, 0x19, 0xdb, 0x2e, 0x4d, 0x37, 0xd8, 0xd3,
+	0xe8, 0x42, 0x67, 0x14, 0x86, 0x54, 0xc6, 0x5e, 0x67, 0x9d, 0xd7, 0x1a, 0xfd, 0x4f, 0x97, 0x3f,
+	0xa1, 0x96, 0xb6, 0x73, 0xf4, 0x02, 0xca, 0xb3, 0xb0, 0x02, 0x36, 0x62, 0x2a, 0x31, 0x3a, 0x85,
+	0x7b, 0x1a, 0x9d, 0x1f, 0x9f, 0xfb, 0xc7, 0xf1, 0x87, 0xbb, 0x50, 0x88, 0xef, 0x37, 0x2a, 0x01,
+	0xe0, 0xb3, 0xae, 0xdb, 0xc6, 0xf8, 0x02, 0x5f, 0x55, 0x32, 0x87, 0x18, 0x76, 0x56, 0x5c, 0x57,
+	0x84, 0xa0, 0x84, 0x5b, 0xfd, 0xb6, 0xdb, 0xef, 0xe0, 0xf6, 0x55, 0xe7, 0xa2, 0x77, 0x56, 0xc9,
+	0x18, 0xba, 0xc6, 0x70, 0xeb, 0xfc, 0x53, 0xbb, 0x92, 0x45, 0x55, 0x28, 0xdf, 0xb4, 0x7a, 0xd7,
+	0x0f, 0x9b, 0x72, 0xb3, 0x07, 0xec, 0xa4, 0x0b, 0x55, 0x21, 0x7d, 0x47, 0x84, 0x94, 0x0f, 0x84,
+	0xf4, 0x1c, 0xfb, 0xc4, 0x7e, 0x6e, 0xfa, 0x4c, 0x0d, 0xa3, 0x5b, 0x67, 0x20, 0x46, 0x8d, 0xa4,
+	0xd6, 0xb0, 0xb5, 0xd7, 0xf1, 0xf3, 0x3b, 0x79, 0xd7, 0xf0, 0x85, 0x7e, 0x84, 0x1b, 0x56, 0xea,
+	0x36, 0x6f, 0x0a, 0x6f, 0xfe, 0x06, 0x00, 0x00, 0xff, 0xff, 0x36, 0x9d, 0x64, 0x93, 0xa7, 0x05,
+	0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
new file mode 100644
index 0000000..e6105b6
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/extension/extensions.pb.go
@@ -0,0 +1,3609 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/extensions.proto
+
+package extension
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	config "github.com/opencord/voltha-protos/v5/go/ext/config"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type ValueType_Type int32
+
+const (
+	ValueType_EMPTY    ValueType_Type = 0
+	ValueType_DISTANCE ValueType_Type = 1
+)
+
+var ValueType_Type_name = map[int32]string{
+	0: "EMPTY",
+	1: "DISTANCE",
+}
+
+var ValueType_Type_value = map[string]int32{
+	"EMPTY":    0,
+	"DISTANCE": 1,
+}
+
+func (x ValueType_Type) String() string {
+	return proto.EnumName(ValueType_Type_name, int32(x))
+}
+
+func (ValueType_Type) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{1, 0}
+}
+
+type GetOnuUniInfoResponse_ConfigurationInd int32
+
+const (
+	GetOnuUniInfoResponse_UNKOWN               GetOnuUniInfoResponse_ConfigurationInd = 0
+	GetOnuUniInfoResponse_TEN_BASE_T_FDX       GetOnuUniInfoResponse_ConfigurationInd = 1
+	GetOnuUniInfoResponse_HUNDRED_BASE_T_FDX   GetOnuUniInfoResponse_ConfigurationInd = 2
+	GetOnuUniInfoResponse_GIGABIT_ETHERNET_FDX GetOnuUniInfoResponse_ConfigurationInd = 3
+	GetOnuUniInfoResponse_TEN_G_ETHERNET_FDX   GetOnuUniInfoResponse_ConfigurationInd = 4
+	GetOnuUniInfoResponse_TEN_BASE_T_HDX       GetOnuUniInfoResponse_ConfigurationInd = 5
+	GetOnuUniInfoResponse_HUNDRED_BASE_T_HDX   GetOnuUniInfoResponse_ConfigurationInd = 6
+	GetOnuUniInfoResponse_GIGABIT_ETHERNET_HDX GetOnuUniInfoResponse_ConfigurationInd = 7
+)
+
+var GetOnuUniInfoResponse_ConfigurationInd_name = map[int32]string{
+	0: "UNKOWN",
+	1: "TEN_BASE_T_FDX",
+	2: "HUNDRED_BASE_T_FDX",
+	3: "GIGABIT_ETHERNET_FDX",
+	4: "TEN_G_ETHERNET_FDX",
+	5: "TEN_BASE_T_HDX",
+	6: "HUNDRED_BASE_T_HDX",
+	7: "GIGABIT_ETHERNET_HDX",
+}
+
+var GetOnuUniInfoResponse_ConfigurationInd_value = map[string]int32{
+	"UNKOWN":               0,
+	"TEN_BASE_T_FDX":       1,
+	"HUNDRED_BASE_T_FDX":   2,
+	"GIGABIT_ETHERNET_FDX": 3,
+	"TEN_G_ETHERNET_FDX":   4,
+	"TEN_BASE_T_HDX":       5,
+	"HUNDRED_BASE_T_HDX":   6,
+	"GIGABIT_ETHERNET_HDX": 7,
+}
+
+func (x GetOnuUniInfoResponse_ConfigurationInd) String() string {
+	return proto.EnumName(GetOnuUniInfoResponse_ConfigurationInd_name, int32(x))
+}
+
+func (GetOnuUniInfoResponse_ConfigurationInd) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{7, 0}
+}
+
+type GetOnuUniInfoResponse_AdministrativeState int32
+
+const (
+	GetOnuUniInfoResponse_ADMSTATE_UNDEFINED GetOnuUniInfoResponse_AdministrativeState = 0
+	GetOnuUniInfoResponse_LOCKED             GetOnuUniInfoResponse_AdministrativeState = 1
+	GetOnuUniInfoResponse_UNLOCKED           GetOnuUniInfoResponse_AdministrativeState = 2
+)
+
+var GetOnuUniInfoResponse_AdministrativeState_name = map[int32]string{
+	0: "ADMSTATE_UNDEFINED",
+	1: "LOCKED",
+	2: "UNLOCKED",
+}
+
+var GetOnuUniInfoResponse_AdministrativeState_value = map[string]int32{
+	"ADMSTATE_UNDEFINED": 0,
+	"LOCKED":             1,
+	"UNLOCKED":           2,
+}
+
+func (x GetOnuUniInfoResponse_AdministrativeState) String() string {
+	return proto.EnumName(GetOnuUniInfoResponse_AdministrativeState_name, int32(x))
+}
+
+func (GetOnuUniInfoResponse_AdministrativeState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{7, 1}
+}
+
+type GetOnuUniInfoResponse_OperationalState int32
+
+const (
+	GetOnuUniInfoResponse_OPERSTATE_UNDEFINED GetOnuUniInfoResponse_OperationalState = 0
+	GetOnuUniInfoResponse_ENABLED             GetOnuUniInfoResponse_OperationalState = 1
+	GetOnuUniInfoResponse_DISABLED            GetOnuUniInfoResponse_OperationalState = 2
+)
+
+var GetOnuUniInfoResponse_OperationalState_name = map[int32]string{
+	0: "OPERSTATE_UNDEFINED",
+	1: "ENABLED",
+	2: "DISABLED",
+}
+
+var GetOnuUniInfoResponse_OperationalState_value = map[string]int32{
+	"OPERSTATE_UNDEFINED": 0,
+	"ENABLED":             1,
+	"DISABLED":            2,
+}
+
+func (x GetOnuUniInfoResponse_OperationalState) String() string {
+	return proto.EnumName(GetOnuUniInfoResponse_OperationalState_name, int32(x))
+}
+
+func (GetOnuUniInfoResponse_OperationalState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{7, 2}
+}
+
+type GetOltPortCounters_PortType int32
+
+const (
+	GetOltPortCounters_Port_UNKNOWN      GetOltPortCounters_PortType = 0
+	GetOltPortCounters_Port_ETHERNET_NNI GetOltPortCounters_PortType = 1
+	GetOltPortCounters_Port_PON_OLT      GetOltPortCounters_PortType = 2
+)
+
+var GetOltPortCounters_PortType_name = map[int32]string{
+	0: "Port_UNKNOWN",
+	1: "Port_ETHERNET_NNI",
+	2: "Port_PON_OLT",
+}
+
+var GetOltPortCounters_PortType_value = map[string]int32{
+	"Port_UNKNOWN":      0,
+	"Port_ETHERNET_NNI": 1,
+	"Port_PON_OLT":      2,
+}
+
+func (x GetOltPortCounters_PortType) String() string {
+	return proto.EnumName(GetOltPortCounters_PortType_name, int32(x))
+}
+
+func (GetOltPortCounters_PortType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{8, 0}
+}
+
+type GetOnuEthernetBridgePortHistory_Direction int32
+
+const (
+	GetOnuEthernetBridgePortHistory_UNDEFINED  GetOnuEthernetBridgePortHistory_Direction = 0
+	GetOnuEthernetBridgePortHistory_UPSTREAM   GetOnuEthernetBridgePortHistory_Direction = 1
+	GetOnuEthernetBridgePortHistory_DOWNSTREAM GetOnuEthernetBridgePortHistory_Direction = 2
+)
+
+var GetOnuEthernetBridgePortHistory_Direction_name = map[int32]string{
+	0: "UNDEFINED",
+	1: "UPSTREAM",
+	2: "DOWNSTREAM",
+}
+
+var GetOnuEthernetBridgePortHistory_Direction_value = map[string]int32{
+	"UNDEFINED":  0,
+	"UPSTREAM":   1,
+	"DOWNSTREAM": 2,
+}
+
+func (x GetOnuEthernetBridgePortHistory_Direction) String() string {
+	return proto.EnumName(GetOnuEthernetBridgePortHistory_Direction_name, int32(x))
+}
+
+func (GetOnuEthernetBridgePortHistory_Direction) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{12, 0}
+}
+
+type GetOmciEthernetFrameExtendedPmResponse_Format int32
+
+const (
+	GetOmciEthernetFrameExtendedPmResponse_THIRTY_TWO_BIT GetOmciEthernetFrameExtendedPmResponse_Format = 0
+	GetOmciEthernetFrameExtendedPmResponse_SIXTY_FOUR_BIT GetOmciEthernetFrameExtendedPmResponse_Format = 1
+)
+
+var GetOmciEthernetFrameExtendedPmResponse_Format_name = map[int32]string{
+	0: "THIRTY_TWO_BIT",
+	1: "SIXTY_FOUR_BIT",
+}
+
+var GetOmciEthernetFrameExtendedPmResponse_Format_value = map[string]int32{
+	"THIRTY_TWO_BIT": 0,
+	"SIXTY_FOUR_BIT": 1,
+}
+
+func (x GetOmciEthernetFrameExtendedPmResponse_Format) String() string {
+	return proto.EnumName(GetOmciEthernetFrameExtendedPmResponse_Format_name, int32(x))
+}
+
+func (GetOmciEthernetFrameExtendedPmResponse_Format) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{21, 0}
+}
+
+type GetValueResponse_Status int32
+
+const (
+	GetValueResponse_STATUS_UNDEFINED GetValueResponse_Status = 0
+	GetValueResponse_OK               GetValueResponse_Status = 1
+	GetValueResponse_ERROR            GetValueResponse_Status = 2
+)
+
+var GetValueResponse_Status_name = map[int32]string{
+	0: "STATUS_UNDEFINED",
+	1: "OK",
+	2: "ERROR",
+}
+
+var GetValueResponse_Status_value = map[string]int32{
+	"STATUS_UNDEFINED": 0,
+	"OK":               1,
+	"ERROR":            2,
+}
+
+func (x GetValueResponse_Status) String() string {
+	return proto.EnumName(GetValueResponse_Status_name, int32(x))
+}
+
+func (GetValueResponse_Status) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{24, 0}
+}
+
+type GetValueResponse_ErrorReason int32
+
+const (
+	GetValueResponse_REASON_UNDEFINED  GetValueResponse_ErrorReason = 0
+	GetValueResponse_UNSUPPORTED       GetValueResponse_ErrorReason = 1
+	GetValueResponse_INVALID_DEVICE_ID GetValueResponse_ErrorReason = 2
+	GetValueResponse_INVALID_PORT_TYPE GetValueResponse_ErrorReason = 3
+	GetValueResponse_TIMEOUT           GetValueResponse_ErrorReason = 4
+	GetValueResponse_INVALID_REQ_TYPE  GetValueResponse_ErrorReason = 5
+	GetValueResponse_INTERNAL_ERROR    GetValueResponse_ErrorReason = 6
+	GetValueResponse_INVALID_DEVICE    GetValueResponse_ErrorReason = 7
+)
+
+var GetValueResponse_ErrorReason_name = map[int32]string{
+	0: "REASON_UNDEFINED",
+	1: "UNSUPPORTED",
+	2: "INVALID_DEVICE_ID",
+	3: "INVALID_PORT_TYPE",
+	4: "TIMEOUT",
+	5: "INVALID_REQ_TYPE",
+	6: "INTERNAL_ERROR",
+	7: "INVALID_DEVICE",
+}
+
+var GetValueResponse_ErrorReason_value = map[string]int32{
+	"REASON_UNDEFINED":  0,
+	"UNSUPPORTED":       1,
+	"INVALID_DEVICE_ID": 2,
+	"INVALID_PORT_TYPE": 3,
+	"TIMEOUT":           4,
+	"INVALID_REQ_TYPE":  5,
+	"INTERNAL_ERROR":    6,
+	"INVALID_DEVICE":    7,
+}
+
+func (x GetValueResponse_ErrorReason) String() string {
+	return proto.EnumName(GetValueResponse_ErrorReason_name, int32(x))
+}
+
+func (GetValueResponse_ErrorReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{24, 1}
+}
+
+type SetValueResponse_Status int32
+
+const (
+	SetValueResponse_STATUS_UNDEFINED SetValueResponse_Status = 0
+	SetValueResponse_OK               SetValueResponse_Status = 1
+	SetValueResponse_ERROR            SetValueResponse_Status = 2
+)
+
+var SetValueResponse_Status_name = map[int32]string{
+	0: "STATUS_UNDEFINED",
+	1: "OK",
+	2: "ERROR",
+}
+
+var SetValueResponse_Status_value = map[string]int32{
+	"STATUS_UNDEFINED": 0,
+	"OK":               1,
+	"ERROR":            2,
+}
+
+func (x SetValueResponse_Status) String() string {
+	return proto.EnumName(SetValueResponse_Status_name, int32(x))
+}
+
+func (SetValueResponse_Status) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{26, 0}
+}
+
+type SetValueResponse_ErrorReason int32
+
+const (
+	SetValueResponse_REASON_UNDEFINED SetValueResponse_ErrorReason = 0
+	SetValueResponse_UNSUPPORTED      SetValueResponse_ErrorReason = 1
+)
+
+var SetValueResponse_ErrorReason_name = map[int32]string{
+	0: "REASON_UNDEFINED",
+	1: "UNSUPPORTED",
+}
+
+var SetValueResponse_ErrorReason_value = map[string]int32{
+	"REASON_UNDEFINED": 0,
+	"UNSUPPORTED":      1,
+}
+
+func (x SetValueResponse_ErrorReason) String() string {
+	return proto.EnumName(SetValueResponse_ErrorReason_name, int32(x))
+}
+
+func (SetValueResponse_ErrorReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{26, 1}
+}
+
+type ValueSet struct {
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Types that are valid to be assigned to Value:
+	//	*ValueSet_AlarmConfig
+	Value                isValueSet_Value `protobuf_oneof:"value"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ValueSet) Reset()         { *m = ValueSet{} }
+func (m *ValueSet) String() string { return proto.CompactTextString(m) }
+func (*ValueSet) ProtoMessage()    {}
+func (*ValueSet) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{0}
+}
+
+func (m *ValueSet) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValueSet.Unmarshal(m, b)
+}
+func (m *ValueSet) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValueSet.Marshal(b, m, deterministic)
+}
+func (m *ValueSet) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValueSet.Merge(m, src)
+}
+func (m *ValueSet) XXX_Size() int {
+	return xxx_messageInfo_ValueSet.Size(m)
+}
+func (m *ValueSet) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValueSet.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValueSet proto.InternalMessageInfo
+
+func (m *ValueSet) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+type isValueSet_Value interface {
+	isValueSet_Value()
+}
+
+type ValueSet_AlarmConfig struct {
+	AlarmConfig *config.AlarmConfig `protobuf:"bytes,2,opt,name=alarm_config,json=alarmConfig,proto3,oneof"`
+}
+
+func (*ValueSet_AlarmConfig) isValueSet_Value() {}
+
+func (m *ValueSet) GetValue() isValueSet_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *ValueSet) GetAlarmConfig() *config.AlarmConfig {
+	if x, ok := m.GetValue().(*ValueSet_AlarmConfig); ok {
+		return x.AlarmConfig
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ValueSet) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*ValueSet_AlarmConfig)(nil),
+	}
+}
+
+type ValueType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ValueType) Reset()         { *m = ValueType{} }
+func (m *ValueType) String() string { return proto.CompactTextString(m) }
+func (*ValueType) ProtoMessage()    {}
+func (*ValueType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{1}
+}
+
+func (m *ValueType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValueType.Unmarshal(m, b)
+}
+func (m *ValueType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValueType.Marshal(b, m, deterministic)
+}
+func (m *ValueType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValueType.Merge(m, src)
+}
+func (m *ValueType) XXX_Size() int {
+	return xxx_messageInfo_ValueType.Size(m)
+}
+func (m *ValueType) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValueType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValueType proto.InternalMessageInfo
+
+type ValueSpecifier struct {
+	Id                   string         `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Value                ValueType_Type `protobuf:"varint,2,opt,name=value,proto3,enum=extension.ValueType_Type" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *ValueSpecifier) Reset()         { *m = ValueSpecifier{} }
+func (m *ValueSpecifier) String() string { return proto.CompactTextString(m) }
+func (*ValueSpecifier) ProtoMessage()    {}
+func (*ValueSpecifier) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{2}
+}
+
+func (m *ValueSpecifier) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ValueSpecifier.Unmarshal(m, b)
+}
+func (m *ValueSpecifier) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ValueSpecifier.Marshal(b, m, deterministic)
+}
+func (m *ValueSpecifier) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ValueSpecifier.Merge(m, src)
+}
+func (m *ValueSpecifier) XXX_Size() int {
+	return xxx_messageInfo_ValueSpecifier.Size(m)
+}
+func (m *ValueSpecifier) XXX_DiscardUnknown() {
+	xxx_messageInfo_ValueSpecifier.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ValueSpecifier proto.InternalMessageInfo
+
+func (m *ValueSpecifier) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *ValueSpecifier) GetValue() ValueType_Type {
+	if m != nil {
+		return m.Value
+	}
+	return ValueType_EMPTY
+}
+
+type ReturnValues struct {
+	Set                  uint32   `protobuf:"varint,1,opt,name=Set,proto3" json:"Set,omitempty"`
+	Unsupported          uint32   `protobuf:"varint,2,opt,name=Unsupported,proto3" json:"Unsupported,omitempty"`
+	Error                uint32   `protobuf:"varint,3,opt,name=Error,proto3" json:"Error,omitempty"`
+	Distance             uint32   `protobuf:"varint,4,opt,name=Distance,proto3" json:"Distance,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ReturnValues) Reset()         { *m = ReturnValues{} }
+func (m *ReturnValues) String() string { return proto.CompactTextString(m) }
+func (*ReturnValues) ProtoMessage()    {}
+func (*ReturnValues) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{3}
+}
+
+func (m *ReturnValues) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ReturnValues.Unmarshal(m, b)
+}
+func (m *ReturnValues) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ReturnValues.Marshal(b, m, deterministic)
+}
+func (m *ReturnValues) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ReturnValues.Merge(m, src)
+}
+func (m *ReturnValues) XXX_Size() int {
+	return xxx_messageInfo_ReturnValues.Size(m)
+}
+func (m *ReturnValues) XXX_DiscardUnknown() {
+	xxx_messageInfo_ReturnValues.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ReturnValues proto.InternalMessageInfo
+
+func (m *ReturnValues) GetSet() uint32 {
+	if m != nil {
+		return m.Set
+	}
+	return 0
+}
+
+func (m *ReturnValues) GetUnsupported() uint32 {
+	if m != nil {
+		return m.Unsupported
+	}
+	return 0
+}
+
+func (m *ReturnValues) GetError() uint32 {
+	if m != nil {
+		return m.Error
+	}
+	return 0
+}
+
+func (m *ReturnValues) GetDistance() uint32 {
+	if m != nil {
+		return m.Distance
+	}
+	return 0
+}
+
+type GetDistanceRequest struct {
+	OnuDeviceId          string   `protobuf:"bytes,1,opt,name=onuDeviceId,proto3" json:"onuDeviceId,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetDistanceRequest) Reset()         { *m = GetDistanceRequest{} }
+func (m *GetDistanceRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDistanceRequest) ProtoMessage()    {}
+func (*GetDistanceRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{4}
+}
+
+func (m *GetDistanceRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetDistanceRequest.Unmarshal(m, b)
+}
+func (m *GetDistanceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetDistanceRequest.Marshal(b, m, deterministic)
+}
+func (m *GetDistanceRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetDistanceRequest.Merge(m, src)
+}
+func (m *GetDistanceRequest) XXX_Size() int {
+	return xxx_messageInfo_GetDistanceRequest.Size(m)
+}
+func (m *GetDistanceRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetDistanceRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetDistanceRequest proto.InternalMessageInfo
+
+func (m *GetDistanceRequest) GetOnuDeviceId() string {
+	if m != nil {
+		return m.OnuDeviceId
+	}
+	return ""
+}
+
+type GetDistanceResponse struct {
+	Distance             uint32   `protobuf:"varint,1,opt,name=distance,proto3" json:"distance,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetDistanceResponse) Reset()         { *m = GetDistanceResponse{} }
+func (m *GetDistanceResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDistanceResponse) ProtoMessage()    {}
+func (*GetDistanceResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{5}
+}
+
+func (m *GetDistanceResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetDistanceResponse.Unmarshal(m, b)
+}
+func (m *GetDistanceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetDistanceResponse.Marshal(b, m, deterministic)
+}
+func (m *GetDistanceResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetDistanceResponse.Merge(m, src)
+}
+func (m *GetDistanceResponse) XXX_Size() int {
+	return xxx_messageInfo_GetDistanceResponse.Size(m)
+}
+func (m *GetDistanceResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetDistanceResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetDistanceResponse proto.InternalMessageInfo
+
+func (m *GetDistanceResponse) GetDistance() uint32 {
+	if m != nil {
+		return m.Distance
+	}
+	return 0
+}
+
+type GetOnuUniInfoRequest struct {
+	UniIndex             uint32   `protobuf:"varint,1,opt,name=uniIndex,proto3" json:"uniIndex,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetOnuUniInfoRequest) Reset()         { *m = GetOnuUniInfoRequest{} }
+func (m *GetOnuUniInfoRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOnuUniInfoRequest) ProtoMessage()    {}
+func (*GetOnuUniInfoRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{6}
+}
+
+func (m *GetOnuUniInfoRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuUniInfoRequest.Unmarshal(m, b)
+}
+func (m *GetOnuUniInfoRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuUniInfoRequest.Marshal(b, m, deterministic)
+}
+func (m *GetOnuUniInfoRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuUniInfoRequest.Merge(m, src)
+}
+func (m *GetOnuUniInfoRequest) XXX_Size() int {
+	return xxx_messageInfo_GetOnuUniInfoRequest.Size(m)
+}
+func (m *GetOnuUniInfoRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuUniInfoRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuUniInfoRequest proto.InternalMessageInfo
+
+func (m *GetOnuUniInfoRequest) GetUniIndex() uint32 {
+	if m != nil {
+		return m.UniIndex
+	}
+	return 0
+}
+
+type GetOnuUniInfoResponse struct {
+	AdmState             GetOnuUniInfoResponse_AdministrativeState `protobuf:"varint,1,opt,name=admState,proto3,enum=extension.GetOnuUniInfoResponse_AdministrativeState" json:"admState,omitempty"`
+	OperState            GetOnuUniInfoResponse_OperationalState    `protobuf:"varint,2,opt,name=operState,proto3,enum=extension.GetOnuUniInfoResponse_OperationalState" json:"operState,omitempty"`
+	ConfigInd            GetOnuUniInfoResponse_ConfigurationInd    `protobuf:"varint,3,opt,name=configInd,proto3,enum=extension.GetOnuUniInfoResponse_ConfigurationInd" json:"configInd,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                  `json:"-"`
+	XXX_unrecognized     []byte                                    `json:"-"`
+	XXX_sizecache        int32                                     `json:"-"`
+}
+
+func (m *GetOnuUniInfoResponse) Reset()         { *m = GetOnuUniInfoResponse{} }
+func (m *GetOnuUniInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOnuUniInfoResponse) ProtoMessage()    {}
+func (*GetOnuUniInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{7}
+}
+
+func (m *GetOnuUniInfoResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuUniInfoResponse.Unmarshal(m, b)
+}
+func (m *GetOnuUniInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuUniInfoResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOnuUniInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuUniInfoResponse.Merge(m, src)
+}
+func (m *GetOnuUniInfoResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOnuUniInfoResponse.Size(m)
+}
+func (m *GetOnuUniInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuUniInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuUniInfoResponse proto.InternalMessageInfo
+
+func (m *GetOnuUniInfoResponse) GetAdmState() GetOnuUniInfoResponse_AdministrativeState {
+	if m != nil {
+		return m.AdmState
+	}
+	return GetOnuUniInfoResponse_ADMSTATE_UNDEFINED
+}
+
+func (m *GetOnuUniInfoResponse) GetOperState() GetOnuUniInfoResponse_OperationalState {
+	if m != nil {
+		return m.OperState
+	}
+	return GetOnuUniInfoResponse_OPERSTATE_UNDEFINED
+}
+
+func (m *GetOnuUniInfoResponse) GetConfigInd() GetOnuUniInfoResponse_ConfigurationInd {
+	if m != nil {
+		return m.ConfigInd
+	}
+	return GetOnuUniInfoResponse_UNKOWN
+}
+
+type GetOltPortCounters struct {
+	PortNo               uint32                      `protobuf:"varint,1,opt,name=portNo,proto3" json:"portNo,omitempty"`
+	PortType             GetOltPortCounters_PortType `protobuf:"varint,2,opt,name=portType,proto3,enum=extension.GetOltPortCounters_PortType" json:"portType,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *GetOltPortCounters) Reset()         { *m = GetOltPortCounters{} }
+func (m *GetOltPortCounters) String() string { return proto.CompactTextString(m) }
+func (*GetOltPortCounters) ProtoMessage()    {}
+func (*GetOltPortCounters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{8}
+}
+
+func (m *GetOltPortCounters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOltPortCounters.Unmarshal(m, b)
+}
+func (m *GetOltPortCounters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOltPortCounters.Marshal(b, m, deterministic)
+}
+func (m *GetOltPortCounters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOltPortCounters.Merge(m, src)
+}
+func (m *GetOltPortCounters) XXX_Size() int {
+	return xxx_messageInfo_GetOltPortCounters.Size(m)
+}
+func (m *GetOltPortCounters) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOltPortCounters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOltPortCounters proto.InternalMessageInfo
+
+func (m *GetOltPortCounters) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *GetOltPortCounters) GetPortType() GetOltPortCounters_PortType {
+	if m != nil {
+		return m.PortType
+	}
+	return GetOltPortCounters_Port_UNKNOWN
+}
+
+type GetOltPortCountersResponse struct {
+	TxBytes              uint64   `protobuf:"varint,1,opt,name=txBytes,proto3" json:"txBytes,omitempty"`
+	RxBytes              uint64   `protobuf:"varint,2,opt,name=rxBytes,proto3" json:"rxBytes,omitempty"`
+	TxPackets            uint64   `protobuf:"varint,3,opt,name=txPackets,proto3" json:"txPackets,omitempty"`
+	RxPackets            uint64   `protobuf:"varint,4,opt,name=rxPackets,proto3" json:"rxPackets,omitempty"`
+	TxErrorPackets       uint64   `protobuf:"varint,5,opt,name=txErrorPackets,proto3" json:"txErrorPackets,omitempty"`
+	RxErrorPackets       uint64   `protobuf:"varint,6,opt,name=rxErrorPackets,proto3" json:"rxErrorPackets,omitempty"`
+	TxBcastPackets       uint64   `protobuf:"varint,7,opt,name=txBcastPackets,proto3" json:"txBcastPackets,omitempty"`
+	RxBcastPackets       uint64   `protobuf:"varint,8,opt,name=rxBcastPackets,proto3" json:"rxBcastPackets,omitempty"`
+	TxUcastPackets       uint64   `protobuf:"varint,9,opt,name=txUcastPackets,proto3" json:"txUcastPackets,omitempty"`
+	RxUcastPackets       uint64   `protobuf:"varint,10,opt,name=rxUcastPackets,proto3" json:"rxUcastPackets,omitempty"`
+	TxMcastPackets       uint64   `protobuf:"varint,11,opt,name=txMcastPackets,proto3" json:"txMcastPackets,omitempty"`
+	RxMcastPackets       uint64   `protobuf:"varint,12,opt,name=rxMcastPackets,proto3" json:"rxMcastPackets,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetOltPortCountersResponse) Reset()         { *m = GetOltPortCountersResponse{} }
+func (m *GetOltPortCountersResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOltPortCountersResponse) ProtoMessage()    {}
+func (*GetOltPortCountersResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{9}
+}
+
+func (m *GetOltPortCountersResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOltPortCountersResponse.Unmarshal(m, b)
+}
+func (m *GetOltPortCountersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOltPortCountersResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOltPortCountersResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOltPortCountersResponse.Merge(m, src)
+}
+func (m *GetOltPortCountersResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOltPortCountersResponse.Size(m)
+}
+func (m *GetOltPortCountersResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOltPortCountersResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOltPortCountersResponse proto.InternalMessageInfo
+
+func (m *GetOltPortCountersResponse) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetTxErrorPackets() uint64 {
+	if m != nil {
+		return m.TxErrorPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetRxErrorPackets() uint64 {
+	if m != nil {
+		return m.RxErrorPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetTxBcastPackets() uint64 {
+	if m != nil {
+		return m.TxBcastPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetRxBcastPackets() uint64 {
+	if m != nil {
+		return m.RxBcastPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetTxUcastPackets() uint64 {
+	if m != nil {
+		return m.TxUcastPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetRxUcastPackets() uint64 {
+	if m != nil {
+		return m.RxUcastPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetTxMcastPackets() uint64 {
+	if m != nil {
+		return m.TxMcastPackets
+	}
+	return 0
+}
+
+func (m *GetOltPortCountersResponse) GetRxMcastPackets() uint64 {
+	if m != nil {
+		return m.RxMcastPackets
+	}
+	return 0
+}
+
+type GetOnuPonOpticalInfo struct {
+	Empty                *empty.Empty `protobuf:"bytes,1,opt,name=empty,proto3" json:"empty,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *GetOnuPonOpticalInfo) Reset()         { *m = GetOnuPonOpticalInfo{} }
+func (m *GetOnuPonOpticalInfo) String() string { return proto.CompactTextString(m) }
+func (*GetOnuPonOpticalInfo) ProtoMessage()    {}
+func (*GetOnuPonOpticalInfo) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{10}
+}
+
+func (m *GetOnuPonOpticalInfo) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuPonOpticalInfo.Unmarshal(m, b)
+}
+func (m *GetOnuPonOpticalInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuPonOpticalInfo.Marshal(b, m, deterministic)
+}
+func (m *GetOnuPonOpticalInfo) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuPonOpticalInfo.Merge(m, src)
+}
+func (m *GetOnuPonOpticalInfo) XXX_Size() int {
+	return xxx_messageInfo_GetOnuPonOpticalInfo.Size(m)
+}
+func (m *GetOnuPonOpticalInfo) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuPonOpticalInfo.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuPonOpticalInfo proto.InternalMessageInfo
+
+func (m *GetOnuPonOpticalInfo) GetEmpty() *empty.Empty {
+	if m != nil {
+		return m.Empty
+	}
+	return nil
+}
+
+// These values correspond to the Optical Line Supervision Test results
+// described in section A3.39.5 of ITU-T G.988 (11/2017) specification.
+type GetOnuPonOpticalInfoResponse struct {
+	PowerFeedVoltage       float32  `protobuf:"fixed32,1,opt,name=powerFeedVoltage,proto3" json:"powerFeedVoltage,omitempty"`
+	ReceivedOpticalPower   float32  `protobuf:"fixed32,2,opt,name=receivedOpticalPower,proto3" json:"receivedOpticalPower,omitempty"`
+	MeanOpticalLaunchPower float32  `protobuf:"fixed32,3,opt,name=meanOpticalLaunchPower,proto3" json:"meanOpticalLaunchPower,omitempty"`
+	LaserBiasCurrent       float32  `protobuf:"fixed32,4,opt,name=laserBiasCurrent,proto3" json:"laserBiasCurrent,omitempty"`
+	Temperature            float32  `protobuf:"fixed32,5,opt,name=temperature,proto3" json:"temperature,omitempty"`
+	XXX_NoUnkeyedLiteral   struct{} `json:"-"`
+	XXX_unrecognized       []byte   `json:"-"`
+	XXX_sizecache          int32    `json:"-"`
+}
+
+func (m *GetOnuPonOpticalInfoResponse) Reset()         { *m = GetOnuPonOpticalInfoResponse{} }
+func (m *GetOnuPonOpticalInfoResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOnuPonOpticalInfoResponse) ProtoMessage()    {}
+func (*GetOnuPonOpticalInfoResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{11}
+}
+
+func (m *GetOnuPonOpticalInfoResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuPonOpticalInfoResponse.Unmarshal(m, b)
+}
+func (m *GetOnuPonOpticalInfoResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuPonOpticalInfoResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOnuPonOpticalInfoResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuPonOpticalInfoResponse.Merge(m, src)
+}
+func (m *GetOnuPonOpticalInfoResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOnuPonOpticalInfoResponse.Size(m)
+}
+func (m *GetOnuPonOpticalInfoResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuPonOpticalInfoResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuPonOpticalInfoResponse proto.InternalMessageInfo
+
+func (m *GetOnuPonOpticalInfoResponse) GetPowerFeedVoltage() float32 {
+	if m != nil {
+		return m.PowerFeedVoltage
+	}
+	return 0
+}
+
+func (m *GetOnuPonOpticalInfoResponse) GetReceivedOpticalPower() float32 {
+	if m != nil {
+		return m.ReceivedOpticalPower
+	}
+	return 0
+}
+
+func (m *GetOnuPonOpticalInfoResponse) GetMeanOpticalLaunchPower() float32 {
+	if m != nil {
+		return m.MeanOpticalLaunchPower
+	}
+	return 0
+}
+
+func (m *GetOnuPonOpticalInfoResponse) GetLaserBiasCurrent() float32 {
+	if m != nil {
+		return m.LaserBiasCurrent
+	}
+	return 0
+}
+
+func (m *GetOnuPonOpticalInfoResponse) GetTemperature() float32 {
+	if m != nil {
+		return m.Temperature
+	}
+	return 0
+}
+
+type GetOnuEthernetBridgePortHistory struct {
+	Direction            GetOnuEthernetBridgePortHistory_Direction `protobuf:"varint,1,opt,name=direction,proto3,enum=extension.GetOnuEthernetBridgePortHistory_Direction" json:"direction,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                  `json:"-"`
+	XXX_unrecognized     []byte                                    `json:"-"`
+	XXX_sizecache        int32                                     `json:"-"`
+}
+
+func (m *GetOnuEthernetBridgePortHistory) Reset()         { *m = GetOnuEthernetBridgePortHistory{} }
+func (m *GetOnuEthernetBridgePortHistory) String() string { return proto.CompactTextString(m) }
+func (*GetOnuEthernetBridgePortHistory) ProtoMessage()    {}
+func (*GetOnuEthernetBridgePortHistory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{12}
+}
+
+func (m *GetOnuEthernetBridgePortHistory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuEthernetBridgePortHistory.Unmarshal(m, b)
+}
+func (m *GetOnuEthernetBridgePortHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuEthernetBridgePortHistory.Marshal(b, m, deterministic)
+}
+func (m *GetOnuEthernetBridgePortHistory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuEthernetBridgePortHistory.Merge(m, src)
+}
+func (m *GetOnuEthernetBridgePortHistory) XXX_Size() int {
+	return xxx_messageInfo_GetOnuEthernetBridgePortHistory.Size(m)
+}
+func (m *GetOnuEthernetBridgePortHistory) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuEthernetBridgePortHistory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuEthernetBridgePortHistory proto.InternalMessageInfo
+
+func (m *GetOnuEthernetBridgePortHistory) GetDirection() GetOnuEthernetBridgePortHistory_Direction {
+	if m != nil {
+		return m.Direction
+	}
+	return GetOnuEthernetBridgePortHistory_UNDEFINED
+}
+
+type GetOnuEthernetBridgePortHistoryResponse struct {
+	DropEvents              uint32   `protobuf:"varint,1,opt,name=dropEvents,proto3" json:"dropEvents,omitempty"`
+	Octets                  uint32   `protobuf:"varint,2,opt,name=octets,proto3" json:"octets,omitempty"`
+	Packets                 uint32   `protobuf:"varint,3,opt,name=packets,proto3" json:"packets,omitempty"`
+	BroadcastPackets        uint32   `protobuf:"varint,4,opt,name=broadcastPackets,proto3" json:"broadcastPackets,omitempty"`
+	MulticastPackets        uint32   `protobuf:"varint,5,opt,name=multicastPackets,proto3" json:"multicastPackets,omitempty"`
+	CrcErroredPackets       uint32   `protobuf:"varint,6,opt,name=crcErroredPackets,proto3" json:"crcErroredPackets,omitempty"`
+	UndersizePackets        uint32   `protobuf:"varint,7,opt,name=undersizePackets,proto3" json:"undersizePackets,omitempty"`
+	OversizePackets         uint32   `protobuf:"varint,8,opt,name=oversizePackets,proto3" json:"oversizePackets,omitempty"`
+	Packets64Octets         uint32   `protobuf:"varint,9,opt,name=packets64octets,proto3" json:"packets64octets,omitempty"`
+	Packets65To127Octets    uint32   `protobuf:"varint,10,opt,name=packets65To127octets,proto3" json:"packets65To127octets,omitempty"`
+	Packets128To255Octets   uint32   `protobuf:"varint,11,opt,name=packets128To255Octets,proto3" json:"packets128To255Octets,omitempty"`
+	Packets256To511Octets   uint32   `protobuf:"varint,12,opt,name=packets256To511octets,proto3" json:"packets256To511octets,omitempty"`
+	Packets512To1023Octets  uint32   `protobuf:"varint,13,opt,name=packets512To1023octets,proto3" json:"packets512To1023octets,omitempty"`
+	Packets1024To1518Octets uint32   `protobuf:"varint,14,opt,name=packets1024To1518octets,proto3" json:"packets1024To1518octets,omitempty"`
+	XXX_NoUnkeyedLiteral    struct{} `json:"-"`
+	XXX_unrecognized        []byte   `json:"-"`
+	XXX_sizecache           int32    `json:"-"`
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) Reset() {
+	*m = GetOnuEthernetBridgePortHistoryResponse{}
+}
+func (m *GetOnuEthernetBridgePortHistoryResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOnuEthernetBridgePortHistoryResponse) ProtoMessage()    {}
+func (*GetOnuEthernetBridgePortHistoryResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{13}
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuEthernetBridgePortHistoryResponse.Unmarshal(m, b)
+}
+func (m *GetOnuEthernetBridgePortHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuEthernetBridgePortHistoryResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOnuEthernetBridgePortHistoryResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuEthernetBridgePortHistoryResponse.Merge(m, src)
+}
+func (m *GetOnuEthernetBridgePortHistoryResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOnuEthernetBridgePortHistoryResponse.Size(m)
+}
+func (m *GetOnuEthernetBridgePortHistoryResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuEthernetBridgePortHistoryResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuEthernetBridgePortHistoryResponse proto.InternalMessageInfo
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetDropEvents() uint32 {
+	if m != nil {
+		return m.DropEvents
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetOctets() uint32 {
+	if m != nil {
+		return m.Octets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets() uint32 {
+	if m != nil {
+		return m.Packets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetBroadcastPackets() uint32 {
+	if m != nil {
+		return m.BroadcastPackets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetMulticastPackets() uint32 {
+	if m != nil {
+		return m.MulticastPackets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetCrcErroredPackets() uint32 {
+	if m != nil {
+		return m.CrcErroredPackets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetUndersizePackets() uint32 {
+	if m != nil {
+		return m.UndersizePackets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetOversizePackets() uint32 {
+	if m != nil {
+		return m.OversizePackets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets64Octets() uint32 {
+	if m != nil {
+		return m.Packets64Octets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets65To127Octets() uint32 {
+	if m != nil {
+		return m.Packets65To127Octets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets128To255Octets() uint32 {
+	if m != nil {
+		return m.Packets128To255Octets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets256To511Octets() uint32 {
+	if m != nil {
+		return m.Packets256To511Octets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets512To1023Octets() uint32 {
+	if m != nil {
+		return m.Packets512To1023Octets
+	}
+	return 0
+}
+
+func (m *GetOnuEthernetBridgePortHistoryResponse) GetPackets1024To1518Octets() uint32 {
+	if m != nil {
+		return m.Packets1024To1518Octets
+	}
+	return 0
+}
+
+type GetOnuFecHistory struct {
+	Empty                *empty.Empty `protobuf:"bytes,1,opt,name=empty,proto3" json:"empty,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *GetOnuFecHistory) Reset()         { *m = GetOnuFecHistory{} }
+func (m *GetOnuFecHistory) String() string { return proto.CompactTextString(m) }
+func (*GetOnuFecHistory) ProtoMessage()    {}
+func (*GetOnuFecHistory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{14}
+}
+
+func (m *GetOnuFecHistory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuFecHistory.Unmarshal(m, b)
+}
+func (m *GetOnuFecHistory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuFecHistory.Marshal(b, m, deterministic)
+}
+func (m *GetOnuFecHistory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuFecHistory.Merge(m, src)
+}
+func (m *GetOnuFecHistory) XXX_Size() int {
+	return xxx_messageInfo_GetOnuFecHistory.Size(m)
+}
+func (m *GetOnuFecHistory) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuFecHistory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuFecHistory proto.InternalMessageInfo
+
+func (m *GetOnuFecHistory) GetEmpty() *empty.Empty {
+	if m != nil {
+		return m.Empty
+	}
+	return nil
+}
+
+type GetOnuFecHistoryResponse struct {
+	CorrectedBytes         uint32   `protobuf:"varint,1,opt,name=correctedBytes,proto3" json:"correctedBytes,omitempty"`
+	CorrectedCodeWords     uint32   `protobuf:"varint,2,opt,name=correctedCodeWords,proto3" json:"correctedCodeWords,omitempty"`
+	FecSeconds             uint32   `protobuf:"varint,3,opt,name=fecSeconds,proto3" json:"fecSeconds,omitempty"`
+	TotalCodeWords         uint32   `protobuf:"varint,4,opt,name=totalCodeWords,proto3" json:"totalCodeWords,omitempty"`
+	UncorrectableCodeWords uint32   `protobuf:"varint,5,opt,name=uncorrectableCodeWords,proto3" json:"uncorrectableCodeWords,omitempty"`
+	XXX_NoUnkeyedLiteral   struct{} `json:"-"`
+	XXX_unrecognized       []byte   `json:"-"`
+	XXX_sizecache          int32    `json:"-"`
+}
+
+func (m *GetOnuFecHistoryResponse) Reset()         { *m = GetOnuFecHistoryResponse{} }
+func (m *GetOnuFecHistoryResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOnuFecHistoryResponse) ProtoMessage()    {}
+func (*GetOnuFecHistoryResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{15}
+}
+
+func (m *GetOnuFecHistoryResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuFecHistoryResponse.Unmarshal(m, b)
+}
+func (m *GetOnuFecHistoryResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuFecHistoryResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOnuFecHistoryResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuFecHistoryResponse.Merge(m, src)
+}
+func (m *GetOnuFecHistoryResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOnuFecHistoryResponse.Size(m)
+}
+func (m *GetOnuFecHistoryResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuFecHistoryResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuFecHistoryResponse proto.InternalMessageInfo
+
+func (m *GetOnuFecHistoryResponse) GetCorrectedBytes() uint32 {
+	if m != nil {
+		return m.CorrectedBytes
+	}
+	return 0
+}
+
+func (m *GetOnuFecHistoryResponse) GetCorrectedCodeWords() uint32 {
+	if m != nil {
+		return m.CorrectedCodeWords
+	}
+	return 0
+}
+
+func (m *GetOnuFecHistoryResponse) GetFecSeconds() uint32 {
+	if m != nil {
+		return m.FecSeconds
+	}
+	return 0
+}
+
+func (m *GetOnuFecHistoryResponse) GetTotalCodeWords() uint32 {
+	if m != nil {
+		return m.TotalCodeWords
+	}
+	return 0
+}
+
+func (m *GetOnuFecHistoryResponse) GetUncorrectableCodeWords() uint32 {
+	if m != nil {
+		return m.UncorrectableCodeWords
+	}
+	return 0
+}
+
+type GetOnuCountersRequest struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetOnuCountersRequest) Reset()         { *m = GetOnuCountersRequest{} }
+func (m *GetOnuCountersRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOnuCountersRequest) ProtoMessage()    {}
+func (*GetOnuCountersRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{16}
+}
+
+func (m *GetOnuCountersRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuCountersRequest.Unmarshal(m, b)
+}
+func (m *GetOnuCountersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuCountersRequest.Marshal(b, m, deterministic)
+}
+func (m *GetOnuCountersRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuCountersRequest.Merge(m, src)
+}
+func (m *GetOnuCountersRequest) XXX_Size() int {
+	return xxx_messageInfo_GetOnuCountersRequest.Size(m)
+}
+func (m *GetOnuCountersRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuCountersRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuCountersRequest proto.InternalMessageInfo
+
+func (m *GetOnuCountersRequest) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *GetOnuCountersRequest) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+type GetOmciEthernetFrameExtendedPmRequest struct {
+	OnuDeviceId string `protobuf:"bytes,1,opt,name=onuDeviceId,proto3" json:"onuDeviceId,omitempty"`
+	// Types that are valid to be assigned to IsUniIndex:
+	//	*GetOmciEthernetFrameExtendedPmRequest_UniIndex
+	IsUniIndex           isGetOmciEthernetFrameExtendedPmRequest_IsUniIndex `protobuf_oneof:"is_uni_index"`
+	Reset_               bool                                               `protobuf:"varint,3,opt,name=reset,proto3" json:"reset,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                           `json:"-"`
+	XXX_unrecognized     []byte                                             `json:"-"`
+	XXX_sizecache        int32                                              `json:"-"`
+}
+
+func (m *GetOmciEthernetFrameExtendedPmRequest) Reset()         { *m = GetOmciEthernetFrameExtendedPmRequest{} }
+func (m *GetOmciEthernetFrameExtendedPmRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOmciEthernetFrameExtendedPmRequest) ProtoMessage()    {}
+func (*GetOmciEthernetFrameExtendedPmRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{17}
+}
+
+func (m *GetOmciEthernetFrameExtendedPmRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOmciEthernetFrameExtendedPmRequest.Unmarshal(m, b)
+}
+func (m *GetOmciEthernetFrameExtendedPmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOmciEthernetFrameExtendedPmRequest.Marshal(b, m, deterministic)
+}
+func (m *GetOmciEthernetFrameExtendedPmRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOmciEthernetFrameExtendedPmRequest.Merge(m, src)
+}
+func (m *GetOmciEthernetFrameExtendedPmRequest) XXX_Size() int {
+	return xxx_messageInfo_GetOmciEthernetFrameExtendedPmRequest.Size(m)
+}
+func (m *GetOmciEthernetFrameExtendedPmRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOmciEthernetFrameExtendedPmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOmciEthernetFrameExtendedPmRequest proto.InternalMessageInfo
+
+func (m *GetOmciEthernetFrameExtendedPmRequest) GetOnuDeviceId() string {
+	if m != nil {
+		return m.OnuDeviceId
+	}
+	return ""
+}
+
+type isGetOmciEthernetFrameExtendedPmRequest_IsUniIndex interface {
+	isGetOmciEthernetFrameExtendedPmRequest_IsUniIndex()
+}
+
+type GetOmciEthernetFrameExtendedPmRequest_UniIndex struct {
+	UniIndex uint32 `protobuf:"varint,2,opt,name=uniIndex,proto3,oneof"`
+}
+
+func (*GetOmciEthernetFrameExtendedPmRequest_UniIndex) isGetOmciEthernetFrameExtendedPmRequest_IsUniIndex() {
+}
+
+func (m *GetOmciEthernetFrameExtendedPmRequest) GetIsUniIndex() isGetOmciEthernetFrameExtendedPmRequest_IsUniIndex {
+	if m != nil {
+		return m.IsUniIndex
+	}
+	return nil
+}
+
+func (m *GetOmciEthernetFrameExtendedPmRequest) GetUniIndex() uint32 {
+	if x, ok := m.GetIsUniIndex().(*GetOmciEthernetFrameExtendedPmRequest_UniIndex); ok {
+		return x.UniIndex
+	}
+	return 0
+}
+
+func (m *GetOmciEthernetFrameExtendedPmRequest) GetReset_() bool {
+	if m != nil {
+		return m.Reset_
+	}
+	return false
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*GetOmciEthernetFrameExtendedPmRequest) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*GetOmciEthernetFrameExtendedPmRequest_UniIndex)(nil),
+	}
+}
+
+type GetRxPowerRequest struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetRxPowerRequest) Reset()         { *m = GetRxPowerRequest{} }
+func (m *GetRxPowerRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRxPowerRequest) ProtoMessage()    {}
+func (*GetRxPowerRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{18}
+}
+
+func (m *GetRxPowerRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetRxPowerRequest.Unmarshal(m, b)
+}
+func (m *GetRxPowerRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetRxPowerRequest.Marshal(b, m, deterministic)
+}
+func (m *GetRxPowerRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetRxPowerRequest.Merge(m, src)
+}
+func (m *GetRxPowerRequest) XXX_Size() int {
+	return xxx_messageInfo_GetRxPowerRequest.Size(m)
+}
+func (m *GetRxPowerRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetRxPowerRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetRxPowerRequest proto.InternalMessageInfo
+
+func (m *GetRxPowerRequest) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *GetRxPowerRequest) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+type GetOnuCountersResponse struct {
+	// Types that are valid to be assigned to IsIntfId:
+	//	*GetOnuCountersResponse_IntfId
+	IsIntfId isGetOnuCountersResponse_IsIntfId `protobuf_oneof:"is_intf_id"`
+	// Types that are valid to be assigned to IsOnuId:
+	//	*GetOnuCountersResponse_OnuId
+	IsOnuId isGetOnuCountersResponse_IsOnuId `protobuf_oneof:"is_onu_id"`
+	// Types that are valid to be assigned to IsPositiveDrift:
+	//	*GetOnuCountersResponse_PositiveDrift
+	IsPositiveDrift isGetOnuCountersResponse_IsPositiveDrift `protobuf_oneof:"is_positive_drift"`
+	// Types that are valid to be assigned to IsNegativeDrift:
+	//	*GetOnuCountersResponse_NegativeDrift
+	IsNegativeDrift isGetOnuCountersResponse_IsNegativeDrift `protobuf_oneof:"is_negative_drift"`
+	// Types that are valid to be assigned to IsDelimiterMissDetection:
+	//	*GetOnuCountersResponse_DelimiterMissDetection
+	IsDelimiterMissDetection isGetOnuCountersResponse_IsDelimiterMissDetection `protobuf_oneof:"is_delimiter_miss_detection"`
+	// Types that are valid to be assigned to IsBipErrors:
+	//	*GetOnuCountersResponse_BipErrors
+	IsBipErrors isGetOnuCountersResponse_IsBipErrors `protobuf_oneof:"is_bip_errors"`
+	// Types that are valid to be assigned to IsBipUnits:
+	//	*GetOnuCountersResponse_BipUnits
+	IsBipUnits isGetOnuCountersResponse_IsBipUnits `protobuf_oneof:"is_bip_units"`
+	// Types that are valid to be assigned to IsFecCorrectedSymbols:
+	//	*GetOnuCountersResponse_FecCorrectedSymbols
+	IsFecCorrectedSymbols isGetOnuCountersResponse_IsFecCorrectedSymbols `protobuf_oneof:"is_fec_corrected_symbols"`
+	// Types that are valid to be assigned to IsFecCodewordsCorrected:
+	//	*GetOnuCountersResponse_FecCodewordsCorrected
+	IsFecCodewordsCorrected isGetOnuCountersResponse_IsFecCodewordsCorrected `protobuf_oneof:"is_fec_codewords_corrected"`
+	// Types that are valid to be assigned to IsFecCodewordsUncorrectable:
+	//	*GetOnuCountersResponse_FecCodewordsUncorrectable
+	IsFecCodewordsUncorrectable isGetOnuCountersResponse_IsFecCodewordsUncorrectable `protobuf_oneof:"is_fec_codewords_uncorrectable"`
+	// Types that are valid to be assigned to IsFecCodewords:
+	//	*GetOnuCountersResponse_FecCodewords
+	IsFecCodewords isGetOnuCountersResponse_IsFecCodewords `protobuf_oneof:"is_fec_codewords"`
+	// Types that are valid to be assigned to IsFecCorrectedUnits:
+	//	*GetOnuCountersResponse_FecCorrectedUnits
+	IsFecCorrectedUnits isGetOnuCountersResponse_IsFecCorrectedUnits `protobuf_oneof:"is_fec_corrected_units"`
+	// Types that are valid to be assigned to IsXgemKeyErrors:
+	//	*GetOnuCountersResponse_XgemKeyErrors
+	IsXgemKeyErrors isGetOnuCountersResponse_IsXgemKeyErrors `protobuf_oneof:"is_xgem_key_errors"`
+	// Types that are valid to be assigned to IsXgemLoss:
+	//	*GetOnuCountersResponse_XgemLoss
+	IsXgemLoss isGetOnuCountersResponse_IsXgemLoss `protobuf_oneof:"is_xgem_loss"`
+	// Types that are valid to be assigned to IsRxPloamsError:
+	//	*GetOnuCountersResponse_RxPloamsError
+	IsRxPloamsError isGetOnuCountersResponse_IsRxPloamsError `protobuf_oneof:"is_rx_ploams_error"`
+	// Types that are valid to be assigned to IsRxPloamsNonIdle:
+	//	*GetOnuCountersResponse_RxPloamsNonIdle
+	IsRxPloamsNonIdle isGetOnuCountersResponse_IsRxPloamsNonIdle `protobuf_oneof:"is_rx_ploams_non_idle"`
+	// Types that are valid to be assigned to IsRxOmci:
+	//	*GetOnuCountersResponse_RxOmci
+	IsRxOmci isGetOnuCountersResponse_IsRxOmci `protobuf_oneof:"is_rx_omci"`
+	// Types that are valid to be assigned to IsTxOmci:
+	//	*GetOnuCountersResponse_TxOmci
+	IsTxOmci isGetOnuCountersResponse_IsTxOmci `protobuf_oneof:"is_tx_omci"`
+	// Types that are valid to be assigned to IsRxOmciPacketsCrcError:
+	//	*GetOnuCountersResponse_RxOmciPacketsCrcError
+	IsRxOmciPacketsCrcError isGetOnuCountersResponse_IsRxOmciPacketsCrcError `protobuf_oneof:"is_rx_omci_packets_crc_error"`
+	// Types that are valid to be assigned to IsRxBytes:
+	//	*GetOnuCountersResponse_RxBytes
+	IsRxBytes isGetOnuCountersResponse_IsRxBytes `protobuf_oneof:"is_rx_bytes"`
+	// Types that are valid to be assigned to IsRxPackets:
+	//	*GetOnuCountersResponse_RxPackets
+	IsRxPackets isGetOnuCountersResponse_IsRxPackets `protobuf_oneof:"is_rx_packets"`
+	// Types that are valid to be assigned to IsTxBytes:
+	//	*GetOnuCountersResponse_TxBytes
+	IsTxBytes isGetOnuCountersResponse_IsTxBytes `protobuf_oneof:"is_tx_bytes"`
+	// Types that are valid to be assigned to IsTxPackets:
+	//	*GetOnuCountersResponse_TxPackets
+	IsTxPackets isGetOnuCountersResponse_IsTxPackets `protobuf_oneof:"is_tx_packets"`
+	// Types that are valid to be assigned to IsBerReported:
+	//	*GetOnuCountersResponse_BerReported
+	IsBerReported isGetOnuCountersResponse_IsBerReported `protobuf_oneof:"is_ber_reported"`
+	// Types that are valid to be assigned to IsLcdgErrors:
+	//	*GetOnuCountersResponse_LcdgErrors
+	IsLcdgErrors isGetOnuCountersResponse_IsLcdgErrors `protobuf_oneof:"is_lcdg_errors"`
+	// Types that are valid to be assigned to IsRdiErrors:
+	//	*GetOnuCountersResponse_RdiErrors
+	IsRdiErrors isGetOnuCountersResponse_IsRdiErrors `protobuf_oneof:"is_rdi_errors"`
+	// Types that are valid to be assigned to IsTimestamp:
+	//	*GetOnuCountersResponse_Timestamp
+	IsTimestamp          isGetOnuCountersResponse_IsTimestamp `protobuf_oneof:"is_timestamp"`
+	XXX_NoUnkeyedLiteral struct{}                             `json:"-"`
+	XXX_unrecognized     []byte                               `json:"-"`
+	XXX_sizecache        int32                                `json:"-"`
+}
+
+func (m *GetOnuCountersResponse) Reset()         { *m = GetOnuCountersResponse{} }
+func (m *GetOnuCountersResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOnuCountersResponse) ProtoMessage()    {}
+func (*GetOnuCountersResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{19}
+}
+
+func (m *GetOnuCountersResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOnuCountersResponse.Unmarshal(m, b)
+}
+func (m *GetOnuCountersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOnuCountersResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOnuCountersResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOnuCountersResponse.Merge(m, src)
+}
+func (m *GetOnuCountersResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOnuCountersResponse.Size(m)
+}
+func (m *GetOnuCountersResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOnuCountersResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOnuCountersResponse proto.InternalMessageInfo
+
+type isGetOnuCountersResponse_IsIntfId interface {
+	isGetOnuCountersResponse_IsIntfId()
+}
+
+type GetOnuCountersResponse_IntfId struct {
+	IntfId uint32 `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_IntfId) isGetOnuCountersResponse_IsIntfId() {}
+
+func (m *GetOnuCountersResponse) GetIsIntfId() isGetOnuCountersResponse_IsIntfId {
+	if m != nil {
+		return m.IsIntfId
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetIntfId() uint32 {
+	if x, ok := m.GetIsIntfId().(*GetOnuCountersResponse_IntfId); ok {
+		return x.IntfId
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsOnuId interface {
+	isGetOnuCountersResponse_IsOnuId()
+}
+
+type GetOnuCountersResponse_OnuId struct {
+	OnuId uint32 `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_OnuId) isGetOnuCountersResponse_IsOnuId() {}
+
+func (m *GetOnuCountersResponse) GetIsOnuId() isGetOnuCountersResponse_IsOnuId {
+	if m != nil {
+		return m.IsOnuId
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetOnuId() uint32 {
+	if x, ok := m.GetIsOnuId().(*GetOnuCountersResponse_OnuId); ok {
+		return x.OnuId
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsPositiveDrift interface {
+	isGetOnuCountersResponse_IsPositiveDrift()
+}
+
+type GetOnuCountersResponse_PositiveDrift struct {
+	PositiveDrift uint64 `protobuf:"fixed64,3,opt,name=positive_drift,json=positiveDrift,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_PositiveDrift) isGetOnuCountersResponse_IsPositiveDrift() {}
+
+func (m *GetOnuCountersResponse) GetIsPositiveDrift() isGetOnuCountersResponse_IsPositiveDrift {
+	if m != nil {
+		return m.IsPositiveDrift
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetPositiveDrift() uint64 {
+	if x, ok := m.GetIsPositiveDrift().(*GetOnuCountersResponse_PositiveDrift); ok {
+		return x.PositiveDrift
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsNegativeDrift interface {
+	isGetOnuCountersResponse_IsNegativeDrift()
+}
+
+type GetOnuCountersResponse_NegativeDrift struct {
+	NegativeDrift uint64 `protobuf:"fixed64,4,opt,name=negative_drift,json=negativeDrift,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_NegativeDrift) isGetOnuCountersResponse_IsNegativeDrift() {}
+
+func (m *GetOnuCountersResponse) GetIsNegativeDrift() isGetOnuCountersResponse_IsNegativeDrift {
+	if m != nil {
+		return m.IsNegativeDrift
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetNegativeDrift() uint64 {
+	if x, ok := m.GetIsNegativeDrift().(*GetOnuCountersResponse_NegativeDrift); ok {
+		return x.NegativeDrift
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsDelimiterMissDetection interface {
+	isGetOnuCountersResponse_IsDelimiterMissDetection()
+}
+
+type GetOnuCountersResponse_DelimiterMissDetection struct {
+	DelimiterMissDetection uint64 `protobuf:"fixed64,5,opt,name=delimiter_miss_detection,json=delimiterMissDetection,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_DelimiterMissDetection) isGetOnuCountersResponse_IsDelimiterMissDetection() {
+}
+
+func (m *GetOnuCountersResponse) GetIsDelimiterMissDetection() isGetOnuCountersResponse_IsDelimiterMissDetection {
+	if m != nil {
+		return m.IsDelimiterMissDetection
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetDelimiterMissDetection() uint64 {
+	if x, ok := m.GetIsDelimiterMissDetection().(*GetOnuCountersResponse_DelimiterMissDetection); ok {
+		return x.DelimiterMissDetection
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsBipErrors interface {
+	isGetOnuCountersResponse_IsBipErrors()
+}
+
+type GetOnuCountersResponse_BipErrors struct {
+	BipErrors uint64 `protobuf:"fixed64,6,opt,name=bip_errors,json=bipErrors,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_BipErrors) isGetOnuCountersResponse_IsBipErrors() {}
+
+func (m *GetOnuCountersResponse) GetIsBipErrors() isGetOnuCountersResponse_IsBipErrors {
+	if m != nil {
+		return m.IsBipErrors
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetBipErrors() uint64 {
+	if x, ok := m.GetIsBipErrors().(*GetOnuCountersResponse_BipErrors); ok {
+		return x.BipErrors
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsBipUnits interface {
+	isGetOnuCountersResponse_IsBipUnits()
+}
+
+type GetOnuCountersResponse_BipUnits struct {
+	BipUnits uint64 `protobuf:"fixed64,7,opt,name=bip_units,json=bipUnits,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_BipUnits) isGetOnuCountersResponse_IsBipUnits() {}
+
+func (m *GetOnuCountersResponse) GetIsBipUnits() isGetOnuCountersResponse_IsBipUnits {
+	if m != nil {
+		return m.IsBipUnits
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetBipUnits() uint64 {
+	if x, ok := m.GetIsBipUnits().(*GetOnuCountersResponse_BipUnits); ok {
+		return x.BipUnits
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsFecCorrectedSymbols interface {
+	isGetOnuCountersResponse_IsFecCorrectedSymbols()
+}
+
+type GetOnuCountersResponse_FecCorrectedSymbols struct {
+	FecCorrectedSymbols uint64 `protobuf:"fixed64,8,opt,name=fec_corrected_symbols,json=fecCorrectedSymbols,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_FecCorrectedSymbols) isGetOnuCountersResponse_IsFecCorrectedSymbols() {}
+
+func (m *GetOnuCountersResponse) GetIsFecCorrectedSymbols() isGetOnuCountersResponse_IsFecCorrectedSymbols {
+	if m != nil {
+		return m.IsFecCorrectedSymbols
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetFecCorrectedSymbols() uint64 {
+	if x, ok := m.GetIsFecCorrectedSymbols().(*GetOnuCountersResponse_FecCorrectedSymbols); ok {
+		return x.FecCorrectedSymbols
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsFecCodewordsCorrected interface {
+	isGetOnuCountersResponse_IsFecCodewordsCorrected()
+}
+
+type GetOnuCountersResponse_FecCodewordsCorrected struct {
+	FecCodewordsCorrected uint64 `protobuf:"fixed64,9,opt,name=fec_codewords_corrected,json=fecCodewordsCorrected,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_FecCodewordsCorrected) isGetOnuCountersResponse_IsFecCodewordsCorrected() {
+}
+
+func (m *GetOnuCountersResponse) GetIsFecCodewordsCorrected() isGetOnuCountersResponse_IsFecCodewordsCorrected {
+	if m != nil {
+		return m.IsFecCodewordsCorrected
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetFecCodewordsCorrected() uint64 {
+	if x, ok := m.GetIsFecCodewordsCorrected().(*GetOnuCountersResponse_FecCodewordsCorrected); ok {
+		return x.FecCodewordsCorrected
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsFecCodewordsUncorrectable interface {
+	isGetOnuCountersResponse_IsFecCodewordsUncorrectable()
+}
+
+type GetOnuCountersResponse_FecCodewordsUncorrectable struct {
+	FecCodewordsUncorrectable uint64 `protobuf:"fixed64,10,opt,name=fec_codewords_uncorrectable,json=fecCodewordsUncorrectable,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_FecCodewordsUncorrectable) isGetOnuCountersResponse_IsFecCodewordsUncorrectable() {
+}
+
+func (m *GetOnuCountersResponse) GetIsFecCodewordsUncorrectable() isGetOnuCountersResponse_IsFecCodewordsUncorrectable {
+	if m != nil {
+		return m.IsFecCodewordsUncorrectable
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetFecCodewordsUncorrectable() uint64 {
+	if x, ok := m.GetIsFecCodewordsUncorrectable().(*GetOnuCountersResponse_FecCodewordsUncorrectable); ok {
+		return x.FecCodewordsUncorrectable
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsFecCodewords interface {
+	isGetOnuCountersResponse_IsFecCodewords()
+}
+
+type GetOnuCountersResponse_FecCodewords struct {
+	FecCodewords uint64 `protobuf:"fixed64,11,opt,name=fec_codewords,json=fecCodewords,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_FecCodewords) isGetOnuCountersResponse_IsFecCodewords() {}
+
+func (m *GetOnuCountersResponse) GetIsFecCodewords() isGetOnuCountersResponse_IsFecCodewords {
+	if m != nil {
+		return m.IsFecCodewords
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetFecCodewords() uint64 {
+	if x, ok := m.GetIsFecCodewords().(*GetOnuCountersResponse_FecCodewords); ok {
+		return x.FecCodewords
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsFecCorrectedUnits interface {
+	isGetOnuCountersResponse_IsFecCorrectedUnits()
+}
+
+type GetOnuCountersResponse_FecCorrectedUnits struct {
+	FecCorrectedUnits uint64 `protobuf:"fixed64,12,opt,name=fec_corrected_units,json=fecCorrectedUnits,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_FecCorrectedUnits) isGetOnuCountersResponse_IsFecCorrectedUnits() {}
+
+func (m *GetOnuCountersResponse) GetIsFecCorrectedUnits() isGetOnuCountersResponse_IsFecCorrectedUnits {
+	if m != nil {
+		return m.IsFecCorrectedUnits
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetFecCorrectedUnits() uint64 {
+	if x, ok := m.GetIsFecCorrectedUnits().(*GetOnuCountersResponse_FecCorrectedUnits); ok {
+		return x.FecCorrectedUnits
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsXgemKeyErrors interface {
+	isGetOnuCountersResponse_IsXgemKeyErrors()
+}
+
+type GetOnuCountersResponse_XgemKeyErrors struct {
+	XgemKeyErrors uint64 `protobuf:"fixed64,13,opt,name=xgem_key_errors,json=xgemKeyErrors,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_XgemKeyErrors) isGetOnuCountersResponse_IsXgemKeyErrors() {}
+
+func (m *GetOnuCountersResponse) GetIsXgemKeyErrors() isGetOnuCountersResponse_IsXgemKeyErrors {
+	if m != nil {
+		return m.IsXgemKeyErrors
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetXgemKeyErrors() uint64 {
+	if x, ok := m.GetIsXgemKeyErrors().(*GetOnuCountersResponse_XgemKeyErrors); ok {
+		return x.XgemKeyErrors
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsXgemLoss interface {
+	isGetOnuCountersResponse_IsXgemLoss()
+}
+
+type GetOnuCountersResponse_XgemLoss struct {
+	XgemLoss uint64 `protobuf:"fixed64,14,opt,name=xgem_loss,json=xgemLoss,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_XgemLoss) isGetOnuCountersResponse_IsXgemLoss() {}
+
+func (m *GetOnuCountersResponse) GetIsXgemLoss() isGetOnuCountersResponse_IsXgemLoss {
+	if m != nil {
+		return m.IsXgemLoss
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetXgemLoss() uint64 {
+	if x, ok := m.GetIsXgemLoss().(*GetOnuCountersResponse_XgemLoss); ok {
+		return x.XgemLoss
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRxPloamsError interface {
+	isGetOnuCountersResponse_IsRxPloamsError()
+}
+
+type GetOnuCountersResponse_RxPloamsError struct {
+	RxPloamsError uint64 `protobuf:"fixed64,15,opt,name=rx_ploams_error,json=rxPloamsError,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RxPloamsError) isGetOnuCountersResponse_IsRxPloamsError() {}
+
+func (m *GetOnuCountersResponse) GetIsRxPloamsError() isGetOnuCountersResponse_IsRxPloamsError {
+	if m != nil {
+		return m.IsRxPloamsError
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRxPloamsError() uint64 {
+	if x, ok := m.GetIsRxPloamsError().(*GetOnuCountersResponse_RxPloamsError); ok {
+		return x.RxPloamsError
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRxPloamsNonIdle interface {
+	isGetOnuCountersResponse_IsRxPloamsNonIdle()
+}
+
+type GetOnuCountersResponse_RxPloamsNonIdle struct {
+	RxPloamsNonIdle uint64 `protobuf:"fixed64,16,opt,name=rx_ploams_non_idle,json=rxPloamsNonIdle,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RxPloamsNonIdle) isGetOnuCountersResponse_IsRxPloamsNonIdle() {}
+
+func (m *GetOnuCountersResponse) GetIsRxPloamsNonIdle() isGetOnuCountersResponse_IsRxPloamsNonIdle {
+	if m != nil {
+		return m.IsRxPloamsNonIdle
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRxPloamsNonIdle() uint64 {
+	if x, ok := m.GetIsRxPloamsNonIdle().(*GetOnuCountersResponse_RxPloamsNonIdle); ok {
+		return x.RxPloamsNonIdle
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRxOmci interface {
+	isGetOnuCountersResponse_IsRxOmci()
+}
+
+type GetOnuCountersResponse_RxOmci struct {
+	RxOmci uint64 `protobuf:"fixed64,17,opt,name=rx_omci,json=rxOmci,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RxOmci) isGetOnuCountersResponse_IsRxOmci() {}
+
+func (m *GetOnuCountersResponse) GetIsRxOmci() isGetOnuCountersResponse_IsRxOmci {
+	if m != nil {
+		return m.IsRxOmci
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRxOmci() uint64 {
+	if x, ok := m.GetIsRxOmci().(*GetOnuCountersResponse_RxOmci); ok {
+		return x.RxOmci
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsTxOmci interface {
+	isGetOnuCountersResponse_IsTxOmci()
+}
+
+type GetOnuCountersResponse_TxOmci struct {
+	TxOmci uint64 `protobuf:"fixed64,18,opt,name=tx_omci,json=txOmci,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_TxOmci) isGetOnuCountersResponse_IsTxOmci() {}
+
+func (m *GetOnuCountersResponse) GetIsTxOmci() isGetOnuCountersResponse_IsTxOmci {
+	if m != nil {
+		return m.IsTxOmci
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetTxOmci() uint64 {
+	if x, ok := m.GetIsTxOmci().(*GetOnuCountersResponse_TxOmci); ok {
+		return x.TxOmci
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRxOmciPacketsCrcError interface {
+	isGetOnuCountersResponse_IsRxOmciPacketsCrcError()
+}
+
+type GetOnuCountersResponse_RxOmciPacketsCrcError struct {
+	RxOmciPacketsCrcError uint64 `protobuf:"fixed64,19,opt,name=rx_omci_packets_crc_error,json=rxOmciPacketsCrcError,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RxOmciPacketsCrcError) isGetOnuCountersResponse_IsRxOmciPacketsCrcError() {
+}
+
+func (m *GetOnuCountersResponse) GetIsRxOmciPacketsCrcError() isGetOnuCountersResponse_IsRxOmciPacketsCrcError {
+	if m != nil {
+		return m.IsRxOmciPacketsCrcError
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRxOmciPacketsCrcError() uint64 {
+	if x, ok := m.GetIsRxOmciPacketsCrcError().(*GetOnuCountersResponse_RxOmciPacketsCrcError); ok {
+		return x.RxOmciPacketsCrcError
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRxBytes interface {
+	isGetOnuCountersResponse_IsRxBytes()
+}
+
+type GetOnuCountersResponse_RxBytes struct {
+	RxBytes uint64 `protobuf:"fixed64,20,opt,name=rx_bytes,json=rxBytes,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RxBytes) isGetOnuCountersResponse_IsRxBytes() {}
+
+func (m *GetOnuCountersResponse) GetIsRxBytes() isGetOnuCountersResponse_IsRxBytes {
+	if m != nil {
+		return m.IsRxBytes
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRxBytes() uint64 {
+	if x, ok := m.GetIsRxBytes().(*GetOnuCountersResponse_RxBytes); ok {
+		return x.RxBytes
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRxPackets interface {
+	isGetOnuCountersResponse_IsRxPackets()
+}
+
+type GetOnuCountersResponse_RxPackets struct {
+	RxPackets uint64 `protobuf:"fixed64,21,opt,name=rx_packets,json=rxPackets,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RxPackets) isGetOnuCountersResponse_IsRxPackets() {}
+
+func (m *GetOnuCountersResponse) GetIsRxPackets() isGetOnuCountersResponse_IsRxPackets {
+	if m != nil {
+		return m.IsRxPackets
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRxPackets() uint64 {
+	if x, ok := m.GetIsRxPackets().(*GetOnuCountersResponse_RxPackets); ok {
+		return x.RxPackets
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsTxBytes interface {
+	isGetOnuCountersResponse_IsTxBytes()
+}
+
+type GetOnuCountersResponse_TxBytes struct {
+	TxBytes uint64 `protobuf:"fixed64,22,opt,name=tx_bytes,json=txBytes,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_TxBytes) isGetOnuCountersResponse_IsTxBytes() {}
+
+func (m *GetOnuCountersResponse) GetIsTxBytes() isGetOnuCountersResponse_IsTxBytes {
+	if m != nil {
+		return m.IsTxBytes
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetTxBytes() uint64 {
+	if x, ok := m.GetIsTxBytes().(*GetOnuCountersResponse_TxBytes); ok {
+		return x.TxBytes
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsTxPackets interface {
+	isGetOnuCountersResponse_IsTxPackets()
+}
+
+type GetOnuCountersResponse_TxPackets struct {
+	TxPackets uint64 `protobuf:"fixed64,23,opt,name=tx_packets,json=txPackets,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_TxPackets) isGetOnuCountersResponse_IsTxPackets() {}
+
+func (m *GetOnuCountersResponse) GetIsTxPackets() isGetOnuCountersResponse_IsTxPackets {
+	if m != nil {
+		return m.IsTxPackets
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetTxPackets() uint64 {
+	if x, ok := m.GetIsTxPackets().(*GetOnuCountersResponse_TxPackets); ok {
+		return x.TxPackets
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsBerReported interface {
+	isGetOnuCountersResponse_IsBerReported()
+}
+
+type GetOnuCountersResponse_BerReported struct {
+	BerReported uint64 `protobuf:"fixed64,24,opt,name=ber_reported,json=berReported,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_BerReported) isGetOnuCountersResponse_IsBerReported() {}
+
+func (m *GetOnuCountersResponse) GetIsBerReported() isGetOnuCountersResponse_IsBerReported {
+	if m != nil {
+		return m.IsBerReported
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetBerReported() uint64 {
+	if x, ok := m.GetIsBerReported().(*GetOnuCountersResponse_BerReported); ok {
+		return x.BerReported
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsLcdgErrors interface {
+	isGetOnuCountersResponse_IsLcdgErrors()
+}
+
+type GetOnuCountersResponse_LcdgErrors struct {
+	LcdgErrors uint64 `protobuf:"fixed64,25,opt,name=lcdg_errors,json=lcdgErrors,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_LcdgErrors) isGetOnuCountersResponse_IsLcdgErrors() {}
+
+func (m *GetOnuCountersResponse) GetIsLcdgErrors() isGetOnuCountersResponse_IsLcdgErrors {
+	if m != nil {
+		return m.IsLcdgErrors
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetLcdgErrors() uint64 {
+	if x, ok := m.GetIsLcdgErrors().(*GetOnuCountersResponse_LcdgErrors); ok {
+		return x.LcdgErrors
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsRdiErrors interface {
+	isGetOnuCountersResponse_IsRdiErrors()
+}
+
+type GetOnuCountersResponse_RdiErrors struct {
+	RdiErrors uint64 `protobuf:"fixed64,26,opt,name=rdi_errors,json=rdiErrors,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_RdiErrors) isGetOnuCountersResponse_IsRdiErrors() {}
+
+func (m *GetOnuCountersResponse) GetIsRdiErrors() isGetOnuCountersResponse_IsRdiErrors {
+	if m != nil {
+		return m.IsRdiErrors
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetRdiErrors() uint64 {
+	if x, ok := m.GetIsRdiErrors().(*GetOnuCountersResponse_RdiErrors); ok {
+		return x.RdiErrors
+	}
+	return 0
+}
+
+type isGetOnuCountersResponse_IsTimestamp interface {
+	isGetOnuCountersResponse_IsTimestamp()
+}
+
+type GetOnuCountersResponse_Timestamp struct {
+	Timestamp uint32 `protobuf:"fixed32,27,opt,name=timestamp,proto3,oneof"`
+}
+
+func (*GetOnuCountersResponse_Timestamp) isGetOnuCountersResponse_IsTimestamp() {}
+
+func (m *GetOnuCountersResponse) GetIsTimestamp() isGetOnuCountersResponse_IsTimestamp {
+	if m != nil {
+		return m.IsTimestamp
+	}
+	return nil
+}
+
+func (m *GetOnuCountersResponse) GetTimestamp() uint32 {
+	if x, ok := m.GetIsTimestamp().(*GetOnuCountersResponse_Timestamp); ok {
+		return x.Timestamp
+	}
+	return 0
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*GetOnuCountersResponse) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*GetOnuCountersResponse_IntfId)(nil),
+		(*GetOnuCountersResponse_OnuId)(nil),
+		(*GetOnuCountersResponse_PositiveDrift)(nil),
+		(*GetOnuCountersResponse_NegativeDrift)(nil),
+		(*GetOnuCountersResponse_DelimiterMissDetection)(nil),
+		(*GetOnuCountersResponse_BipErrors)(nil),
+		(*GetOnuCountersResponse_BipUnits)(nil),
+		(*GetOnuCountersResponse_FecCorrectedSymbols)(nil),
+		(*GetOnuCountersResponse_FecCodewordsCorrected)(nil),
+		(*GetOnuCountersResponse_FecCodewordsUncorrectable)(nil),
+		(*GetOnuCountersResponse_FecCodewords)(nil),
+		(*GetOnuCountersResponse_FecCorrectedUnits)(nil),
+		(*GetOnuCountersResponse_XgemKeyErrors)(nil),
+		(*GetOnuCountersResponse_XgemLoss)(nil),
+		(*GetOnuCountersResponse_RxPloamsError)(nil),
+		(*GetOnuCountersResponse_RxPloamsNonIdle)(nil),
+		(*GetOnuCountersResponse_RxOmci)(nil),
+		(*GetOnuCountersResponse_TxOmci)(nil),
+		(*GetOnuCountersResponse_RxOmciPacketsCrcError)(nil),
+		(*GetOnuCountersResponse_RxBytes)(nil),
+		(*GetOnuCountersResponse_RxPackets)(nil),
+		(*GetOnuCountersResponse_TxBytes)(nil),
+		(*GetOnuCountersResponse_TxPackets)(nil),
+		(*GetOnuCountersResponse_BerReported)(nil),
+		(*GetOnuCountersResponse_LcdgErrors)(nil),
+		(*GetOnuCountersResponse_RdiErrors)(nil),
+		(*GetOnuCountersResponse_Timestamp)(nil),
+	}
+}
+
+type OmciEthernetFrameExtendedPm struct {
+	DropEvents               uint64   `protobuf:"fixed64,1,opt,name=drop_events,json=dropEvents,proto3" json:"drop_events,omitempty"`
+	Octets                   uint64   `protobuf:"fixed64,2,opt,name=octets,proto3" json:"octets,omitempty"`
+	Frames                   uint64   `protobuf:"fixed64,3,opt,name=frames,proto3" json:"frames,omitempty"`
+	BroadcastFrames          uint64   `protobuf:"fixed64,4,opt,name=broadcast_frames,json=broadcastFrames,proto3" json:"broadcast_frames,omitempty"`
+	MulticastFrames          uint64   `protobuf:"fixed64,5,opt,name=multicast_frames,json=multicastFrames,proto3" json:"multicast_frames,omitempty"`
+	CrcErroredFrames         uint64   `protobuf:"fixed64,6,opt,name=crc_errored_frames,json=crcErroredFrames,proto3" json:"crc_errored_frames,omitempty"`
+	UndersizeFrames          uint64   `protobuf:"fixed64,7,opt,name=undersize_frames,json=undersizeFrames,proto3" json:"undersize_frames,omitempty"`
+	OversizeFrames           uint64   `protobuf:"fixed64,8,opt,name=oversize_frames,json=oversizeFrames,proto3" json:"oversize_frames,omitempty"`
+	Frames_64Octets          uint64   `protobuf:"fixed64,9,opt,name=frames_64_octets,json=frames64Octets,proto3" json:"frames_64_octets,omitempty"`
+	Frames_65To_127Octets    uint64   `protobuf:"fixed64,10,opt,name=frames_65_to_127_octets,json=frames65To127Octets,proto3" json:"frames_65_to_127_octets,omitempty"`
+	Frames_128To_255Octets   uint64   `protobuf:"fixed64,11,opt,name=frames_128_to_255_octets,json=frames128To255Octets,proto3" json:"frames_128_to_255_octets,omitempty"`
+	Frames_256To_511Octets   uint64   `protobuf:"fixed64,12,opt,name=frames_256_to_511_octets,json=frames256To511Octets,proto3" json:"frames_256_to_511_octets,omitempty"`
+	Frames_512To_1023Octets  uint64   `protobuf:"fixed64,13,opt,name=frames_512_to_1023_octets,json=frames512To1023Octets,proto3" json:"frames_512_to_1023_octets,omitempty"`
+	Frames_1024To_1518Octets uint64   `protobuf:"fixed64,14,opt,name=frames_1024_to_1518_octets,json=frames1024To1518Octets,proto3" json:"frames_1024_to_1518_octets,omitempty"`
+	XXX_NoUnkeyedLiteral     struct{} `json:"-"`
+	XXX_unrecognized         []byte   `json:"-"`
+	XXX_sizecache            int32    `json:"-"`
+}
+
+func (m *OmciEthernetFrameExtendedPm) Reset()         { *m = OmciEthernetFrameExtendedPm{} }
+func (m *OmciEthernetFrameExtendedPm) String() string { return proto.CompactTextString(m) }
+func (*OmciEthernetFrameExtendedPm) ProtoMessage()    {}
+func (*OmciEthernetFrameExtendedPm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{20}
+}
+
+func (m *OmciEthernetFrameExtendedPm) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciEthernetFrameExtendedPm.Unmarshal(m, b)
+}
+func (m *OmciEthernetFrameExtendedPm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciEthernetFrameExtendedPm.Marshal(b, m, deterministic)
+}
+func (m *OmciEthernetFrameExtendedPm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciEthernetFrameExtendedPm.Merge(m, src)
+}
+func (m *OmciEthernetFrameExtendedPm) XXX_Size() int {
+	return xxx_messageInfo_OmciEthernetFrameExtendedPm.Size(m)
+}
+func (m *OmciEthernetFrameExtendedPm) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciEthernetFrameExtendedPm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciEthernetFrameExtendedPm proto.InternalMessageInfo
+
+func (m *OmciEthernetFrameExtendedPm) GetDropEvents() uint64 {
+	if m != nil {
+		return m.DropEvents
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetOctets() uint64 {
+	if m != nil {
+		return m.Octets
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames() uint64 {
+	if m != nil {
+		return m.Frames
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetBroadcastFrames() uint64 {
+	if m != nil {
+		return m.BroadcastFrames
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetMulticastFrames() uint64 {
+	if m != nil {
+		return m.MulticastFrames
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetCrcErroredFrames() uint64 {
+	if m != nil {
+		return m.CrcErroredFrames
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetUndersizeFrames() uint64 {
+	if m != nil {
+		return m.UndersizeFrames
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetOversizeFrames() uint64 {
+	if m != nil {
+		return m.OversizeFrames
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames_64Octets() uint64 {
+	if m != nil {
+		return m.Frames_64Octets
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames_65To_127Octets() uint64 {
+	if m != nil {
+		return m.Frames_65To_127Octets
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames_128To_255Octets() uint64 {
+	if m != nil {
+		return m.Frames_128To_255Octets
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames_256To_511Octets() uint64 {
+	if m != nil {
+		return m.Frames_256To_511Octets
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames_512To_1023Octets() uint64 {
+	if m != nil {
+		return m.Frames_512To_1023Octets
+	}
+	return 0
+}
+
+func (m *OmciEthernetFrameExtendedPm) GetFrames_1024To_1518Octets() uint64 {
+	if m != nil {
+		return m.Frames_1024To_1518Octets
+	}
+	return 0
+}
+
+type GetOmciEthernetFrameExtendedPmResponse struct {
+	Upstream                          *OmciEthernetFrameExtendedPm                  `protobuf:"bytes,1,opt,name=upstream,proto3" json:"upstream,omitempty"`
+	Downstream                        *OmciEthernetFrameExtendedPm                  `protobuf:"bytes,2,opt,name=downstream,proto3" json:"downstream,omitempty"`
+	OmciEthernetFrameExtendedPmFormat GetOmciEthernetFrameExtendedPmResponse_Format `protobuf:"varint,3,opt,name=omci_ethernet_frame_extended_pm_format,json=omciEthernetFrameExtendedPmFormat,proto3,enum=extension.GetOmciEthernetFrameExtendedPmResponse_Format" json:"omci_ethernet_frame_extended_pm_format,omitempty"`
+	XXX_NoUnkeyedLiteral              struct{}                                      `json:"-"`
+	XXX_unrecognized                  []byte                                        `json:"-"`
+	XXX_sizecache                     int32                                         `json:"-"`
+}
+
+func (m *GetOmciEthernetFrameExtendedPmResponse) Reset() {
+	*m = GetOmciEthernetFrameExtendedPmResponse{}
+}
+func (m *GetOmciEthernetFrameExtendedPmResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOmciEthernetFrameExtendedPmResponse) ProtoMessage()    {}
+func (*GetOmciEthernetFrameExtendedPmResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{21}
+}
+
+func (m *GetOmciEthernetFrameExtendedPmResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetOmciEthernetFrameExtendedPmResponse.Unmarshal(m, b)
+}
+func (m *GetOmciEthernetFrameExtendedPmResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetOmciEthernetFrameExtendedPmResponse.Marshal(b, m, deterministic)
+}
+func (m *GetOmciEthernetFrameExtendedPmResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetOmciEthernetFrameExtendedPmResponse.Merge(m, src)
+}
+func (m *GetOmciEthernetFrameExtendedPmResponse) XXX_Size() int {
+	return xxx_messageInfo_GetOmciEthernetFrameExtendedPmResponse.Size(m)
+}
+func (m *GetOmciEthernetFrameExtendedPmResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetOmciEthernetFrameExtendedPmResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetOmciEthernetFrameExtendedPmResponse proto.InternalMessageInfo
+
+func (m *GetOmciEthernetFrameExtendedPmResponse) GetUpstream() *OmciEthernetFrameExtendedPm {
+	if m != nil {
+		return m.Upstream
+	}
+	return nil
+}
+
+func (m *GetOmciEthernetFrameExtendedPmResponse) GetDownstream() *OmciEthernetFrameExtendedPm {
+	if m != nil {
+		return m.Downstream
+	}
+	return nil
+}
+
+func (m *GetOmciEthernetFrameExtendedPmResponse) GetOmciEthernetFrameExtendedPmFormat() GetOmciEthernetFrameExtendedPmResponse_Format {
+	if m != nil {
+		return m.OmciEthernetFrameExtendedPmFormat
+	}
+	return GetOmciEthernetFrameExtendedPmResponse_THIRTY_TWO_BIT
+}
+
+type GetRxPowerResponse struct {
+	IntfId               uint32   `protobuf:"fixed32,1,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	OnuId                uint32   `protobuf:"fixed32,2,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	Status               string   `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty"`
+	FailReason           string   `protobuf:"bytes,4,opt,name=fail_reason,json=failReason,proto3" json:"fail_reason,omitempty"`
+	RxPower              float64  `protobuf:"fixed64,5,opt,name=rx_power,json=rxPower,proto3" json:"rx_power,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *GetRxPowerResponse) Reset()         { *m = GetRxPowerResponse{} }
+func (m *GetRxPowerResponse) String() string { return proto.CompactTextString(m) }
+func (*GetRxPowerResponse) ProtoMessage()    {}
+func (*GetRxPowerResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{22}
+}
+
+func (m *GetRxPowerResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetRxPowerResponse.Unmarshal(m, b)
+}
+func (m *GetRxPowerResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetRxPowerResponse.Marshal(b, m, deterministic)
+}
+func (m *GetRxPowerResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetRxPowerResponse.Merge(m, src)
+}
+func (m *GetRxPowerResponse) XXX_Size() int {
+	return xxx_messageInfo_GetRxPowerResponse.Size(m)
+}
+func (m *GetRxPowerResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetRxPowerResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetRxPowerResponse proto.InternalMessageInfo
+
+func (m *GetRxPowerResponse) GetIntfId() uint32 {
+	if m != nil {
+		return m.IntfId
+	}
+	return 0
+}
+
+func (m *GetRxPowerResponse) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *GetRxPowerResponse) GetStatus() string {
+	if m != nil {
+		return m.Status
+	}
+	return ""
+}
+
+func (m *GetRxPowerResponse) GetFailReason() string {
+	if m != nil {
+		return m.FailReason
+	}
+	return ""
+}
+
+func (m *GetRxPowerResponse) GetRxPower() float64 {
+	if m != nil {
+		return m.RxPower
+	}
+	return 0
+}
+
+type GetValueRequest struct {
+	// Types that are valid to be assigned to Request:
+	//	*GetValueRequest_Distance
+	//	*GetValueRequest_UniInfo
+	//	*GetValueRequest_OltPortInfo
+	//	*GetValueRequest_OnuOpticalInfo
+	//	*GetValueRequest_EthBridgePort
+	//	*GetValueRequest_FecHistory
+	//	*GetValueRequest_OnuPonInfo
+	//	*GetValueRequest_OnuInfo
+	//	*GetValueRequest_RxPower
+	Request              isGetValueRequest_Request `protobuf_oneof:"request"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *GetValueRequest) Reset()         { *m = GetValueRequest{} }
+func (m *GetValueRequest) String() string { return proto.CompactTextString(m) }
+func (*GetValueRequest) ProtoMessage()    {}
+func (*GetValueRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{23}
+}
+
+func (m *GetValueRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetValueRequest.Unmarshal(m, b)
+}
+func (m *GetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetValueRequest.Marshal(b, m, deterministic)
+}
+func (m *GetValueRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetValueRequest.Merge(m, src)
+}
+func (m *GetValueRequest) XXX_Size() int {
+	return xxx_messageInfo_GetValueRequest.Size(m)
+}
+func (m *GetValueRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetValueRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetValueRequest proto.InternalMessageInfo
+
+type isGetValueRequest_Request interface {
+	isGetValueRequest_Request()
+}
+
+type GetValueRequest_Distance struct {
+	Distance *GetDistanceRequest `protobuf:"bytes,1,opt,name=distance,proto3,oneof"`
+}
+
+type GetValueRequest_UniInfo struct {
+	UniInfo *GetOnuUniInfoRequest `protobuf:"bytes,2,opt,name=uniInfo,proto3,oneof"`
+}
+
+type GetValueRequest_OltPortInfo struct {
+	OltPortInfo *GetOltPortCounters `protobuf:"bytes,3,opt,name=oltPortInfo,proto3,oneof"`
+}
+
+type GetValueRequest_OnuOpticalInfo struct {
+	OnuOpticalInfo *GetOnuPonOpticalInfo `protobuf:"bytes,4,opt,name=onuOpticalInfo,proto3,oneof"`
+}
+
+type GetValueRequest_EthBridgePort struct {
+	EthBridgePort *GetOnuEthernetBridgePortHistory `protobuf:"bytes,5,opt,name=ethBridgePort,proto3,oneof"`
+}
+
+type GetValueRequest_FecHistory struct {
+	FecHistory *GetOnuFecHistory `protobuf:"bytes,6,opt,name=fecHistory,proto3,oneof"`
+}
+
+type GetValueRequest_OnuPonInfo struct {
+	OnuPonInfo *GetOnuCountersRequest `protobuf:"bytes,7,opt,name=onuPonInfo,proto3,oneof"`
+}
+
+type GetValueRequest_OnuInfo struct {
+	OnuInfo *GetOmciEthernetFrameExtendedPmRequest `protobuf:"bytes,8,opt,name=onuInfo,proto3,oneof"`
+}
+
+type GetValueRequest_RxPower struct {
+	RxPower *GetRxPowerRequest `protobuf:"bytes,9,opt,name=rxPower,proto3,oneof"`
+}
+
+func (*GetValueRequest_Distance) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_UniInfo) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_OltPortInfo) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_OnuOpticalInfo) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_EthBridgePort) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_FecHistory) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_OnuPonInfo) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_OnuInfo) isGetValueRequest_Request() {}
+
+func (*GetValueRequest_RxPower) isGetValueRequest_Request() {}
+
+func (m *GetValueRequest) GetRequest() isGetValueRequest_Request {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetDistance() *GetDistanceRequest {
+	if x, ok := m.GetRequest().(*GetValueRequest_Distance); ok {
+		return x.Distance
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetUniInfo() *GetOnuUniInfoRequest {
+	if x, ok := m.GetRequest().(*GetValueRequest_UniInfo); ok {
+		return x.UniInfo
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetOltPortInfo() *GetOltPortCounters {
+	if x, ok := m.GetRequest().(*GetValueRequest_OltPortInfo); ok {
+		return x.OltPortInfo
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetOnuOpticalInfo() *GetOnuPonOpticalInfo {
+	if x, ok := m.GetRequest().(*GetValueRequest_OnuOpticalInfo); ok {
+		return x.OnuOpticalInfo
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetEthBridgePort() *GetOnuEthernetBridgePortHistory {
+	if x, ok := m.GetRequest().(*GetValueRequest_EthBridgePort); ok {
+		return x.EthBridgePort
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetFecHistory() *GetOnuFecHistory {
+	if x, ok := m.GetRequest().(*GetValueRequest_FecHistory); ok {
+		return x.FecHistory
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetOnuPonInfo() *GetOnuCountersRequest {
+	if x, ok := m.GetRequest().(*GetValueRequest_OnuPonInfo); ok {
+		return x.OnuPonInfo
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetOnuInfo() *GetOmciEthernetFrameExtendedPmRequest {
+	if x, ok := m.GetRequest().(*GetValueRequest_OnuInfo); ok {
+		return x.OnuInfo
+	}
+	return nil
+}
+
+func (m *GetValueRequest) GetRxPower() *GetRxPowerRequest {
+	if x, ok := m.GetRequest().(*GetValueRequest_RxPower); ok {
+		return x.RxPower
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*GetValueRequest) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*GetValueRequest_Distance)(nil),
+		(*GetValueRequest_UniInfo)(nil),
+		(*GetValueRequest_OltPortInfo)(nil),
+		(*GetValueRequest_OnuOpticalInfo)(nil),
+		(*GetValueRequest_EthBridgePort)(nil),
+		(*GetValueRequest_FecHistory)(nil),
+		(*GetValueRequest_OnuPonInfo)(nil),
+		(*GetValueRequest_OnuInfo)(nil),
+		(*GetValueRequest_RxPower)(nil),
+	}
+}
+
+type GetValueResponse struct {
+	Status    GetValueResponse_Status      `protobuf:"varint,1,opt,name=status,proto3,enum=extension.GetValueResponse_Status" json:"status,omitempty"`
+	ErrReason GetValueResponse_ErrorReason `protobuf:"varint,2,opt,name=errReason,proto3,enum=extension.GetValueResponse_ErrorReason" json:"errReason,omitempty"`
+	// Types that are valid to be assigned to Response:
+	//	*GetValueResponse_Distance
+	//	*GetValueResponse_UniInfo
+	//	*GetValueResponse_PortCoutners
+	//	*GetValueResponse_OnuOpticalInfo
+	//	*GetValueResponse_EthBridgePortInfo
+	//	*GetValueResponse_FecHistory
+	//	*GetValueResponse_OnuPonCounters
+	//	*GetValueResponse_OnuCounters
+	//	*GetValueResponse_RxPower
+	Response             isGetValueResponse_Response `protobuf_oneof:"response"`
+	XXX_NoUnkeyedLiteral struct{}                    `json:"-"`
+	XXX_unrecognized     []byte                      `json:"-"`
+	XXX_sizecache        int32                       `json:"-"`
+}
+
+func (m *GetValueResponse) Reset()         { *m = GetValueResponse{} }
+func (m *GetValueResponse) String() string { return proto.CompactTextString(m) }
+func (*GetValueResponse) ProtoMessage()    {}
+func (*GetValueResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{24}
+}
+
+func (m *GetValueResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_GetValueResponse.Unmarshal(m, b)
+}
+func (m *GetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_GetValueResponse.Marshal(b, m, deterministic)
+}
+func (m *GetValueResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_GetValueResponse.Merge(m, src)
+}
+func (m *GetValueResponse) XXX_Size() int {
+	return xxx_messageInfo_GetValueResponse.Size(m)
+}
+func (m *GetValueResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_GetValueResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_GetValueResponse proto.InternalMessageInfo
+
+func (m *GetValueResponse) GetStatus() GetValueResponse_Status {
+	if m != nil {
+		return m.Status
+	}
+	return GetValueResponse_STATUS_UNDEFINED
+}
+
+func (m *GetValueResponse) GetErrReason() GetValueResponse_ErrorReason {
+	if m != nil {
+		return m.ErrReason
+	}
+	return GetValueResponse_REASON_UNDEFINED
+}
+
+type isGetValueResponse_Response interface {
+	isGetValueResponse_Response()
+}
+
+type GetValueResponse_Distance struct {
+	Distance *GetDistanceResponse `protobuf:"bytes,3,opt,name=distance,proto3,oneof"`
+}
+
+type GetValueResponse_UniInfo struct {
+	UniInfo *GetOnuUniInfoResponse `protobuf:"bytes,4,opt,name=uniInfo,proto3,oneof"`
+}
+
+type GetValueResponse_PortCoutners struct {
+	PortCoutners *GetOltPortCountersResponse `protobuf:"bytes,5,opt,name=portCoutners,proto3,oneof"`
+}
+
+type GetValueResponse_OnuOpticalInfo struct {
+	OnuOpticalInfo *GetOnuPonOpticalInfoResponse `protobuf:"bytes,6,opt,name=onuOpticalInfo,proto3,oneof"`
+}
+
+type GetValueResponse_EthBridgePortInfo struct {
+	EthBridgePortInfo *GetOnuEthernetBridgePortHistoryResponse `protobuf:"bytes,7,opt,name=ethBridgePortInfo,proto3,oneof"`
+}
+
+type GetValueResponse_FecHistory struct {
+	FecHistory *GetOnuFecHistoryResponse `protobuf:"bytes,8,opt,name=fecHistory,proto3,oneof"`
+}
+
+type GetValueResponse_OnuPonCounters struct {
+	OnuPonCounters *GetOnuCountersResponse `protobuf:"bytes,9,opt,name=onuPonCounters,proto3,oneof"`
+}
+
+type GetValueResponse_OnuCounters struct {
+	OnuCounters *GetOmciEthernetFrameExtendedPmResponse `protobuf:"bytes,10,opt,name=onuCounters,proto3,oneof"`
+}
+
+type GetValueResponse_RxPower struct {
+	RxPower *GetRxPowerResponse `protobuf:"bytes,11,opt,name=rxPower,proto3,oneof"`
+}
+
+func (*GetValueResponse_Distance) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_UniInfo) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_PortCoutners) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_OnuOpticalInfo) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_EthBridgePortInfo) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_FecHistory) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_OnuPonCounters) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_OnuCounters) isGetValueResponse_Response() {}
+
+func (*GetValueResponse_RxPower) isGetValueResponse_Response() {}
+
+func (m *GetValueResponse) GetResponse() isGetValueResponse_Response {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetDistance() *GetDistanceResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_Distance); ok {
+		return x.Distance
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetUniInfo() *GetOnuUniInfoResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_UniInfo); ok {
+		return x.UniInfo
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetPortCoutners() *GetOltPortCountersResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_PortCoutners); ok {
+		return x.PortCoutners
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetOnuOpticalInfo() *GetOnuPonOpticalInfoResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_OnuOpticalInfo); ok {
+		return x.OnuOpticalInfo
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetEthBridgePortInfo() *GetOnuEthernetBridgePortHistoryResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_EthBridgePortInfo); ok {
+		return x.EthBridgePortInfo
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetFecHistory() *GetOnuFecHistoryResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_FecHistory); ok {
+		return x.FecHistory
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetOnuPonCounters() *GetOnuCountersResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_OnuPonCounters); ok {
+		return x.OnuPonCounters
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetOnuCounters() *GetOmciEthernetFrameExtendedPmResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_OnuCounters); ok {
+		return x.OnuCounters
+	}
+	return nil
+}
+
+func (m *GetValueResponse) GetRxPower() *GetRxPowerResponse {
+	if x, ok := m.GetResponse().(*GetValueResponse_RxPower); ok {
+		return x.RxPower
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*GetValueResponse) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*GetValueResponse_Distance)(nil),
+		(*GetValueResponse_UniInfo)(nil),
+		(*GetValueResponse_PortCoutners)(nil),
+		(*GetValueResponse_OnuOpticalInfo)(nil),
+		(*GetValueResponse_EthBridgePortInfo)(nil),
+		(*GetValueResponse_FecHistory)(nil),
+		(*GetValueResponse_OnuPonCounters)(nil),
+		(*GetValueResponse_OnuCounters)(nil),
+		(*GetValueResponse_RxPower)(nil),
+	}
+}
+
+type SetValueRequest struct {
+	// Types that are valid to be assigned to Request:
+	//	*SetValueRequest_AlarmConfig
+	Request              isSetValueRequest_Request `protobuf_oneof:"request"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *SetValueRequest) Reset()         { *m = SetValueRequest{} }
+func (m *SetValueRequest) String() string { return proto.CompactTextString(m) }
+func (*SetValueRequest) ProtoMessage()    {}
+func (*SetValueRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{25}
+}
+
+func (m *SetValueRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SetValueRequest.Unmarshal(m, b)
+}
+func (m *SetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SetValueRequest.Marshal(b, m, deterministic)
+}
+func (m *SetValueRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SetValueRequest.Merge(m, src)
+}
+func (m *SetValueRequest) XXX_Size() int {
+	return xxx_messageInfo_SetValueRequest.Size(m)
+}
+func (m *SetValueRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SetValueRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetValueRequest proto.InternalMessageInfo
+
+type isSetValueRequest_Request interface {
+	isSetValueRequest_Request()
+}
+
+type SetValueRequest_AlarmConfig struct {
+	AlarmConfig *config.AlarmConfig `protobuf:"bytes,1,opt,name=alarm_config,json=alarmConfig,proto3,oneof"`
+}
+
+func (*SetValueRequest_AlarmConfig) isSetValueRequest_Request() {}
+
+func (m *SetValueRequest) GetRequest() isSetValueRequest_Request {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+func (m *SetValueRequest) GetAlarmConfig() *config.AlarmConfig {
+	if x, ok := m.GetRequest().(*SetValueRequest_AlarmConfig); ok {
+		return x.AlarmConfig
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*SetValueRequest) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*SetValueRequest_AlarmConfig)(nil),
+	}
+}
+
+type SetValueResponse struct {
+	Status               SetValueResponse_Status      `protobuf:"varint,1,opt,name=status,proto3,enum=extension.SetValueResponse_Status" json:"status,omitempty"`
+	ErrReason            SetValueResponse_ErrorReason `protobuf:"varint,2,opt,name=errReason,proto3,enum=extension.SetValueResponse_ErrorReason" json:"errReason,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *SetValueResponse) Reset()         { *m = SetValueResponse{} }
+func (m *SetValueResponse) String() string { return proto.CompactTextString(m) }
+func (*SetValueResponse) ProtoMessage()    {}
+func (*SetValueResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{26}
+}
+
+func (m *SetValueResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SetValueResponse.Unmarshal(m, b)
+}
+func (m *SetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SetValueResponse.Marshal(b, m, deterministic)
+}
+func (m *SetValueResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SetValueResponse.Merge(m, src)
+}
+func (m *SetValueResponse) XXX_Size() int {
+	return xxx_messageInfo_SetValueResponse.Size(m)
+}
+func (m *SetValueResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SetValueResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SetValueResponse proto.InternalMessageInfo
+
+func (m *SetValueResponse) GetStatus() SetValueResponse_Status {
+	if m != nil {
+		return m.Status
+	}
+	return SetValueResponse_STATUS_UNDEFINED
+}
+
+func (m *SetValueResponse) GetErrReason() SetValueResponse_ErrorReason {
+	if m != nil {
+		return m.ErrReason
+	}
+	return SetValueResponse_REASON_UNDEFINED
+}
+
+type SingleGetValueRequest struct {
+	TargetId             string           `protobuf:"bytes,1,opt,name=targetId,proto3" json:"targetId,omitempty"`
+	Request              *GetValueRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *SingleGetValueRequest) Reset()         { *m = SingleGetValueRequest{} }
+func (m *SingleGetValueRequest) String() string { return proto.CompactTextString(m) }
+func (*SingleGetValueRequest) ProtoMessage()    {}
+func (*SingleGetValueRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{27}
+}
+
+func (m *SingleGetValueRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SingleGetValueRequest.Unmarshal(m, b)
+}
+func (m *SingleGetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SingleGetValueRequest.Marshal(b, m, deterministic)
+}
+func (m *SingleGetValueRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SingleGetValueRequest.Merge(m, src)
+}
+func (m *SingleGetValueRequest) XXX_Size() int {
+	return xxx_messageInfo_SingleGetValueRequest.Size(m)
+}
+func (m *SingleGetValueRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SingleGetValueRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SingleGetValueRequest proto.InternalMessageInfo
+
+func (m *SingleGetValueRequest) GetTargetId() string {
+	if m != nil {
+		return m.TargetId
+	}
+	return ""
+}
+
+func (m *SingleGetValueRequest) GetRequest() *GetValueRequest {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+type SingleGetValueResponse struct {
+	Response             *GetValueResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *SingleGetValueResponse) Reset()         { *m = SingleGetValueResponse{} }
+func (m *SingleGetValueResponse) String() string { return proto.CompactTextString(m) }
+func (*SingleGetValueResponse) ProtoMessage()    {}
+func (*SingleGetValueResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{28}
+}
+
+func (m *SingleGetValueResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SingleGetValueResponse.Unmarshal(m, b)
+}
+func (m *SingleGetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SingleGetValueResponse.Marshal(b, m, deterministic)
+}
+func (m *SingleGetValueResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SingleGetValueResponse.Merge(m, src)
+}
+func (m *SingleGetValueResponse) XXX_Size() int {
+	return xxx_messageInfo_SingleGetValueResponse.Size(m)
+}
+func (m *SingleGetValueResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SingleGetValueResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SingleGetValueResponse proto.InternalMessageInfo
+
+func (m *SingleGetValueResponse) GetResponse() *GetValueResponse {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+type SingleSetValueRequest struct {
+	TargetId             string           `protobuf:"bytes,1,opt,name=targetId,proto3" json:"targetId,omitempty"`
+	Request              *SetValueRequest `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *SingleSetValueRequest) Reset()         { *m = SingleSetValueRequest{} }
+func (m *SingleSetValueRequest) String() string { return proto.CompactTextString(m) }
+func (*SingleSetValueRequest) ProtoMessage()    {}
+func (*SingleSetValueRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{29}
+}
+
+func (m *SingleSetValueRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SingleSetValueRequest.Unmarshal(m, b)
+}
+func (m *SingleSetValueRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SingleSetValueRequest.Marshal(b, m, deterministic)
+}
+func (m *SingleSetValueRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SingleSetValueRequest.Merge(m, src)
+}
+func (m *SingleSetValueRequest) XXX_Size() int {
+	return xxx_messageInfo_SingleSetValueRequest.Size(m)
+}
+func (m *SingleSetValueRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SingleSetValueRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SingleSetValueRequest proto.InternalMessageInfo
+
+func (m *SingleSetValueRequest) GetTargetId() string {
+	if m != nil {
+		return m.TargetId
+	}
+	return ""
+}
+
+func (m *SingleSetValueRequest) GetRequest() *SetValueRequest {
+	if m != nil {
+		return m.Request
+	}
+	return nil
+}
+
+type SingleSetValueResponse struct {
+	Response             *SetValueResponse `protobuf:"bytes,1,opt,name=response,proto3" json:"response,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *SingleSetValueResponse) Reset()         { *m = SingleSetValueResponse{} }
+func (m *SingleSetValueResponse) String() string { return proto.CompactTextString(m) }
+func (*SingleSetValueResponse) ProtoMessage()    {}
+func (*SingleSetValueResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7ecf6e9799a9202d, []int{30}
+}
+
+func (m *SingleSetValueResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SingleSetValueResponse.Unmarshal(m, b)
+}
+func (m *SingleSetValueResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SingleSetValueResponse.Marshal(b, m, deterministic)
+}
+func (m *SingleSetValueResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SingleSetValueResponse.Merge(m, src)
+}
+func (m *SingleSetValueResponse) XXX_Size() int {
+	return xxx_messageInfo_SingleSetValueResponse.Size(m)
+}
+func (m *SingleSetValueResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SingleSetValueResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SingleSetValueResponse proto.InternalMessageInfo
+
+func (m *SingleSetValueResponse) GetResponse() *SetValueResponse {
+	if m != nil {
+		return m.Response
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("extension.ValueType_Type", ValueType_Type_name, ValueType_Type_value)
+	proto.RegisterEnum("extension.GetOnuUniInfoResponse_ConfigurationInd", GetOnuUniInfoResponse_ConfigurationInd_name, GetOnuUniInfoResponse_ConfigurationInd_value)
+	proto.RegisterEnum("extension.GetOnuUniInfoResponse_AdministrativeState", GetOnuUniInfoResponse_AdministrativeState_name, GetOnuUniInfoResponse_AdministrativeState_value)
+	proto.RegisterEnum("extension.GetOnuUniInfoResponse_OperationalState", GetOnuUniInfoResponse_OperationalState_name, GetOnuUniInfoResponse_OperationalState_value)
+	proto.RegisterEnum("extension.GetOltPortCounters_PortType", GetOltPortCounters_PortType_name, GetOltPortCounters_PortType_value)
+	proto.RegisterEnum("extension.GetOnuEthernetBridgePortHistory_Direction", GetOnuEthernetBridgePortHistory_Direction_name, GetOnuEthernetBridgePortHistory_Direction_value)
+	proto.RegisterEnum("extension.GetOmciEthernetFrameExtendedPmResponse_Format", GetOmciEthernetFrameExtendedPmResponse_Format_name, GetOmciEthernetFrameExtendedPmResponse_Format_value)
+	proto.RegisterEnum("extension.GetValueResponse_Status", GetValueResponse_Status_name, GetValueResponse_Status_value)
+	proto.RegisterEnum("extension.GetValueResponse_ErrorReason", GetValueResponse_ErrorReason_name, GetValueResponse_ErrorReason_value)
+	proto.RegisterEnum("extension.SetValueResponse_Status", SetValueResponse_Status_name, SetValueResponse_Status_value)
+	proto.RegisterEnum("extension.SetValueResponse_ErrorReason", SetValueResponse_ErrorReason_name, SetValueResponse_ErrorReason_value)
+	proto.RegisterType((*ValueSet)(nil), "extension.ValueSet")
+	proto.RegisterType((*ValueType)(nil), "extension.ValueType")
+	proto.RegisterType((*ValueSpecifier)(nil), "extension.ValueSpecifier")
+	proto.RegisterType((*ReturnValues)(nil), "extension.ReturnValues")
+	proto.RegisterType((*GetDistanceRequest)(nil), "extension.GetDistanceRequest")
+	proto.RegisterType((*GetDistanceResponse)(nil), "extension.GetDistanceResponse")
+	proto.RegisterType((*GetOnuUniInfoRequest)(nil), "extension.GetOnuUniInfoRequest")
+	proto.RegisterType((*GetOnuUniInfoResponse)(nil), "extension.GetOnuUniInfoResponse")
+	proto.RegisterType((*GetOltPortCounters)(nil), "extension.GetOltPortCounters")
+	proto.RegisterType((*GetOltPortCountersResponse)(nil), "extension.GetOltPortCountersResponse")
+	proto.RegisterType((*GetOnuPonOpticalInfo)(nil), "extension.GetOnuPonOpticalInfo")
+	proto.RegisterType((*GetOnuPonOpticalInfoResponse)(nil), "extension.GetOnuPonOpticalInfoResponse")
+	proto.RegisterType((*GetOnuEthernetBridgePortHistory)(nil), "extension.GetOnuEthernetBridgePortHistory")
+	proto.RegisterType((*GetOnuEthernetBridgePortHistoryResponse)(nil), "extension.GetOnuEthernetBridgePortHistoryResponse")
+	proto.RegisterType((*GetOnuFecHistory)(nil), "extension.GetOnuFecHistory")
+	proto.RegisterType((*GetOnuFecHistoryResponse)(nil), "extension.GetOnuFecHistoryResponse")
+	proto.RegisterType((*GetOnuCountersRequest)(nil), "extension.GetOnuCountersRequest")
+	proto.RegisterType((*GetOmciEthernetFrameExtendedPmRequest)(nil), "extension.GetOmciEthernetFrameExtendedPmRequest")
+	proto.RegisterType((*GetRxPowerRequest)(nil), "extension.GetRxPowerRequest")
+	proto.RegisterType((*GetOnuCountersResponse)(nil), "extension.GetOnuCountersResponse")
+	proto.RegisterType((*OmciEthernetFrameExtendedPm)(nil), "extension.OmciEthernetFrameExtendedPm")
+	proto.RegisterType((*GetOmciEthernetFrameExtendedPmResponse)(nil), "extension.GetOmciEthernetFrameExtendedPmResponse")
+	proto.RegisterType((*GetRxPowerResponse)(nil), "extension.GetRxPowerResponse")
+	proto.RegisterType((*GetValueRequest)(nil), "extension.GetValueRequest")
+	proto.RegisterType((*GetValueResponse)(nil), "extension.GetValueResponse")
+	proto.RegisterType((*SetValueRequest)(nil), "extension.SetValueRequest")
+	proto.RegisterType((*SetValueResponse)(nil), "extension.SetValueResponse")
+	proto.RegisterType((*SingleGetValueRequest)(nil), "extension.SingleGetValueRequest")
+	proto.RegisterType((*SingleGetValueResponse)(nil), "extension.SingleGetValueResponse")
+	proto.RegisterType((*SingleSetValueRequest)(nil), "extension.SingleSetValueRequest")
+	proto.RegisterType((*SingleSetValueResponse)(nil), "extension.SingleSetValueResponse")
+}
+
+func init() { proto.RegisterFile("voltha_protos/extensions.proto", fileDescriptor_7ecf6e9799a9202d) }
+
+var fileDescriptor_7ecf6e9799a9202d = []byte{
+	// 3230 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x5a, 0xcd, 0x6f, 0x1b, 0xc9,
+	0xb1, 0x17, 0x29, 0x89, 0x22, 0x8b, 0x22, 0x45, 0xb5, 0xbe, 0x28, 0xc9, 0x9f, 0xb3, 0x58, 0xdb,
+	0x6f, 0xe1, 0xa5, 0x4d, 0xae, 0xe4, 0xd5, 0xf3, 0xee, 0x03, 0x56, 0x23, 0x8e, 0x44, 0xc2, 0x32,
+	0x49, 0x37, 0x49, 0x7b, 0xfd, 0x80, 0x87, 0xc1, 0x88, 0xd3, 0x92, 0x07, 0x4b, 0xce, 0xf0, 0xcd,
+	0x34, 0xb5, 0x72, 0xae, 0xc9, 0x2d, 0xc9, 0x29, 0x97, 0xfc, 0x0d, 0x01, 0x82, 0x1c, 0x72, 0xc8,
+	0x3d, 0xe7, 0xfc, 0x13, 0x01, 0xf2, 0x17, 0x04, 0x39, 0x07, 0x41, 0x7f, 0xcc, 0x27, 0x29, 0xd9,
+	0xde, 0xe4, 0x62, 0xb3, 0xab, 0x7e, 0xf5, 0xeb, 0x9e, 0xea, 0xaa, 0xea, 0x9a, 0x1e, 0xc1, 0x9d,
+	0x4b, 0x67, 0x48, 0xdf, 0x19, 0xfa, 0xd8, 0x75, 0xa8, 0xe3, 0x3d, 0x21, 0x57, 0x94, 0xd8, 0x9e,
+	0xe5, 0xd8, 0x5e, 0x85, 0x4b, 0x50, 0x2e, 0x90, 0xec, 0x4c, 0x43, 0xf5, 0x81, 0x63, 0x9f, 0x5b,
+	0x17, 0x02, 0xba, 0xb3, 0x7b, 0xe1, 0x38, 0x17, 0x43, 0xf2, 0x84, 0x8f, 0xce, 0x26, 0xe7, 0x4f,
+	0xc8, 0x68, 0x4c, 0xdf, 0x0b, 0xa5, 0xf2, 0x7f, 0x90, 0x7d, 0x6d, 0x0c, 0x27, 0xa4, 0x4b, 0x28,
+	0x2a, 0x42, 0xda, 0x32, 0xcb, 0xa9, 0x7b, 0xa9, 0x47, 0x39, 0x9c, 0xb6, 0x4c, 0x74, 0x00, 0xcb,
+	0xc6, 0xd0, 0x70, 0x47, 0x92, 0xae, 0x9c, 0xbe, 0x97, 0x7a, 0x94, 0xaf, 0xad, 0x55, 0x24, 0xfb,
+	0x21, 0xd3, 0x1d, 0xf1, 0xdf, 0x8d, 0x39, 0x9c, 0x37, 0xc2, 0xa1, 0xba, 0x04, 0x8b, 0x97, 0x8c,
+	0x55, 0x79, 0x0c, 0x39, 0x4e, 0xdf, 0x7b, 0x3f, 0x26, 0xca, 0x5d, 0x58, 0x60, 0xff, 0xa3, 0x1c,
+	0x2c, 0x6a, 0x2f, 0x3b, 0xbd, 0xb7, 0xa5, 0x39, 0xb4, 0x0c, 0xd9, 0x7a, 0xb3, 0xdb, 0x3b, 0x6c,
+	0x1d, 0x69, 0xa5, 0x94, 0xf2, 0x0a, 0x8a, 0x62, 0x31, 0x63, 0x32, 0xb0, 0xce, 0x2d, 0xe2, 0x4e,
+	0x2d, 0xe9, 0x89, 0x24, 0xe6, 0x6b, 0x29, 0xd6, 0xb6, 0x2b, 0x81, 0x1b, 0x2a, 0xc1, 0x3c, 0x15,
+	0xf6, 0x0f, 0x96, 0x0b, 0xa0, 0xb0, 0x8c, 0x09, 0x9d, 0xb8, 0x36, 0x57, 0x7b, 0xa8, 0x04, 0xf3,
+	0x5d, 0x42, 0x39, 0x63, 0x01, 0xb3, 0x9f, 0xe8, 0x1e, 0xe4, 0xfb, 0xb6, 0x37, 0x19, 0x8f, 0x1d,
+	0x97, 0x12, 0x93, 0x13, 0x17, 0x70, 0x54, 0x84, 0xd6, 0x61, 0x51, 0x73, 0x5d, 0xc7, 0x2d, 0xcf,
+	0x73, 0x9d, 0x18, 0xa0, 0x1d, 0xc8, 0xd6, 0x2d, 0x8f, 0x1a, 0xf6, 0x80, 0x94, 0x17, 0xb8, 0x22,
+	0x18, 0x2b, 0xcf, 0x00, 0x9d, 0x10, 0xea, 0x0f, 0x31, 0xf9, 0xff, 0x09, 0xf1, 0xf8, 0x4c, 0x8e,
+	0x3d, 0xa9, 0x93, 0x4b, 0x6b, 0x40, 0x9a, 0xfe, 0x53, 0x45, 0x45, 0x4a, 0x15, 0xd6, 0x62, 0x76,
+	0xde, 0xd8, 0xb1, 0x3d, 0xc2, 0xa6, 0x32, 0xfd, 0xa9, 0xc4, 0xca, 0x83, 0xb1, 0x52, 0x83, 0xf5,
+	0x13, 0x42, 0xdb, 0xf6, 0xa4, 0x6f, 0x5b, 0x4d, 0xfb, 0xdc, 0xf1, 0x27, 0xdb, 0x81, 0xec, 0x84,
+	0x49, 0x4c, 0x72, 0xe5, 0xdb, 0xf8, 0x63, 0xe5, 0xaf, 0x0b, 0xb0, 0x91, 0x30, 0x92, 0x33, 0x75,
+	0x20, 0x6b, 0x98, 0xa3, 0x2e, 0x35, 0xa8, 0x98, 0xa9, 0x58, 0xdb, 0x8b, 0xb8, 0x78, 0xa6, 0x4d,
+	0xe5, 0xd0, 0x1c, 0x59, 0xb6, 0xe5, 0x51, 0xd7, 0xa0, 0xd6, 0x25, 0xe1, 0xb6, 0x38, 0x60, 0x41,
+	0x6d, 0xc8, 0x39, 0x63, 0xe2, 0x0a, 0x4a, 0xb1, 0x6b, 0xd5, 0x0f, 0x52, 0xb6, 0xc7, 0x84, 0xb1,
+	0x39, 0xb6, 0x31, 0x14, 0x7c, 0x21, 0x07, 0x23, 0x14, 0x01, 0xd8, 0xb4, 0x4d, 0xbe, 0x23, 0x1f,
+	0x43, 0x28, 0xe2, 0x72, 0x22, 0x48, 0x9b, 0xb6, 0x89, 0x43, 0x0e, 0xe5, 0xcf, 0x29, 0x28, 0x25,
+	0xf5, 0x08, 0x20, 0xd3, 0x6f, 0xbd, 0x68, 0xbf, 0x69, 0x95, 0xe6, 0x10, 0x82, 0x62, 0x4f, 0x6b,
+	0xe9, 0xea, 0x61, 0x57, 0xd3, 0x7b, 0xfa, 0x71, 0xfd, 0xfb, 0x52, 0x0a, 0x6d, 0x02, 0x6a, 0xf4,
+	0x5b, 0x75, 0xac, 0xd5, 0xa3, 0xf2, 0x34, 0x2a, 0xc3, 0xfa, 0x49, 0xf3, 0xe4, 0x50, 0x6d, 0xf6,
+	0x74, 0xad, 0xd7, 0xd0, 0x70, 0x4b, 0x13, 0x9a, 0x79, 0x66, 0xc1, 0x58, 0x4e, 0xe2, 0xf2, 0x85,
+	0x04, 0x7b, 0xa3, 0xfe, 0x7d, 0x69, 0x71, 0x06, 0x3b, 0x93, 0x67, 0x66, 0xb2, 0x33, 0xcd, 0x92,
+	0x72, 0x02, 0x6b, 0x33, 0xf6, 0x81, 0x11, 0x1d, 0xd6, 0x5f, 0x76, 0x7b, 0x87, 0x3d, 0x4d, 0xef,
+	0xb7, 0xea, 0xda, 0x71, 0xb3, 0xa5, 0xd5, 0x4b, 0x73, 0xec, 0xf1, 0x4e, 0xdb, 0x47, 0x2f, 0xb4,
+	0x7a, 0x29, 0xc5, 0x72, 0xb0, 0xdf, 0x92, 0xa3, 0xb4, 0x72, 0x0c, 0xa5, 0xa4, 0xf7, 0xd1, 0x16,
+	0xac, 0xb5, 0x3b, 0x1a, 0x9e, 0xa6, 0xc9, 0xc3, 0x92, 0xd6, 0x3a, 0x54, 0x4f, 0x7d, 0x9e, 0x7a,
+	0xb3, 0x2b, 0x46, 0x69, 0xe5, 0x4f, 0x29, 0x9e, 0x03, 0xed, 0x21, 0xed, 0x38, 0x2e, 0x3d, 0x72,
+	0x26, 0x36, 0x25, 0xae, 0x87, 0x36, 0x21, 0xc3, 0xb2, 0xaa, 0xe5, 0xc8, 0xa0, 0x94, 0x23, 0xa4,
+	0x42, 0x96, 0xfd, 0x62, 0xa9, 0x2b, 0xa3, 0xe4, 0x41, 0x62, 0x53, 0xe3, 0x44, 0x95, 0x8e, 0x44,
+	0xe3, 0xc0, 0x4e, 0xd1, 0x20, 0xeb, 0x4b, 0x51, 0x09, 0x96, 0xd9, 0x6f, 0xbd, 0xdf, 0x7a, 0xd1,
+	0x12, 0xbb, 0xb8, 0x01, 0xab, 0x5c, 0x12, 0x38, 0xae, 0xd5, 0x6a, 0x96, 0x52, 0x01, 0xb0, 0xd3,
+	0x6e, 0xe9, 0xed, 0xd3, 0x5e, 0x29, 0xad, 0xfc, 0x65, 0x1e, 0x76, 0xa6, 0x27, 0x0c, 0x52, 0xa4,
+	0x0c, 0x4b, 0xf4, 0x4a, 0x7d, 0x4f, 0x89, 0xc7, 0x1f, 0x61, 0x01, 0xfb, 0x43, 0xa6, 0x71, 0xa5,
+	0x26, 0x2d, 0x34, 0x72, 0x88, 0x6e, 0x41, 0x8e, 0x5e, 0x75, 0x8c, 0xc1, 0x0f, 0x84, 0x7a, 0x3c,
+	0x66, 0x17, 0x70, 0x28, 0x60, 0x5a, 0x37, 0xd0, 0x2e, 0x08, 0x6d, 0x20, 0x40, 0x0f, 0xa0, 0x48,
+	0xaf, 0x78, 0xc9, 0xf1, 0x21, 0x8b, 0x1c, 0x92, 0x90, 0x32, 0x9c, 0x1b, 0xc7, 0x65, 0x04, 0xce,
+	0x9d, 0xc2, 0xd1, 0x2b, 0x75, 0x60, 0x78, 0xd4, 0xc7, 0x2d, 0xf9, 0x7c, 0x51, 0xa9, 0xe0, 0x8b,
+	0xe1, 0xb2, 0x3e, 0x5f, 0x12, 0x47, 0xaf, 0xfa, 0x51, 0x5c, 0xce, 0xe7, 0xeb, 0x4f, 0xf1, 0xc5,
+	0x70, 0xe0, 0xf3, 0xf5, 0xa7, 0xf8, 0x5e, 0x46, 0x71, 0x79, 0x9f, 0xef, 0xe5, 0x14, 0x5f, 0x0c,
+	0xb7, 0xec, 0xf3, 0x45, 0xa5, 0x4a, 0xdd, 0x2f, 0x90, 0x1d, 0xc7, 0x6e, 0x8f, 0xa9, 0x35, 0x30,
+	0x86, 0xac, 0x34, 0xa0, 0xc7, 0xb0, 0xc8, 0x0f, 0x42, 0xbe, 0x8b, 0xf9, 0xda, 0x66, 0x45, 0x1c,
+	0x93, 0x15, 0xff, 0x98, 0xac, 0x68, 0x4c, 0x8b, 0x05, 0x48, 0xf9, 0x45, 0x1a, 0x6e, 0xcd, 0xa2,
+	0x09, 0xc2, 0xe2, 0x0b, 0x28, 0x8d, 0x9d, 0x1f, 0x89, 0x7b, 0x4c, 0x88, 0xf9, 0xda, 0x19, 0x52,
+	0xe3, 0x42, 0x54, 0xd0, 0x34, 0x9e, 0x92, 0xa3, 0x1a, 0xac, 0xbb, 0x64, 0x40, 0xac, 0x4b, 0x62,
+	0x4a, 0xaa, 0x0e, 0x83, 0xf0, 0xa8, 0x49, 0xe3, 0x99, 0x3a, 0xf4, 0x0c, 0x36, 0x47, 0xc4, 0xf0,
+	0xa7, 0x3e, 0x35, 0x26, 0xf6, 0xe0, 0x9d, 0xb0, 0x9a, 0xe7, 0x56, 0xd7, 0x68, 0xd9, 0xba, 0x86,
+	0x86, 0x47, 0x5c, 0xd5, 0x32, 0xbc, 0xa3, 0x89, 0xeb, 0x12, 0x9b, 0xf2, 0x18, 0x4b, 0xe3, 0x29,
+	0x39, 0x3b, 0xa0, 0x28, 0x19, 0xf1, 0xec, 0x9f, 0xb8, 0x84, 0xc7, 0x59, 0x1a, 0x47, 0x45, 0xca,
+	0x1f, 0x52, 0x70, 0x57, 0xb8, 0x41, 0xa3, 0xef, 0x88, 0x6b, 0x13, 0xaa, 0xba, 0x96, 0x79, 0x41,
+	0x58, 0xa6, 0x34, 0x2c, 0x8f, 0x3a, 0xee, 0x7b, 0x84, 0x21, 0x67, 0x5a, 0x2e, 0x19, 0xb0, 0x0a,
+	0x72, 0xed, 0x21, 0x72, 0xad, 0x79, 0xa5, 0xee, 0xdb, 0xe2, 0x90, 0x46, 0x39, 0x80, 0x5c, 0x20,
+	0x47, 0x05, 0xc8, 0x45, 0x8b, 0x10, 0xab, 0x5f, 0x9d, 0x6e, 0x0f, 0x6b, 0x87, 0x2f, 0x4b, 0x29,
+	0x54, 0x04, 0xa8, 0xb7, 0xdf, 0xb4, 0xe4, 0x38, 0xad, 0xfc, 0x66, 0x11, 0x1e, 0x7e, 0x60, 0xca,
+	0x60, 0x0f, 0xef, 0x00, 0x98, 0xae, 0x33, 0xd6, 0x2e, 0x89, 0x4d, 0x3d, 0x59, 0xa0, 0x22, 0x12,
+	0x56, 0xbc, 0x9c, 0x01, 0x65, 0xa1, 0x26, 0xba, 0x04, 0x39, 0x62, 0x89, 0x3f, 0x8e, 0x24, 0x77,
+	0x01, 0xfb, 0x43, 0xe6, 0xfd, 0x33, 0xd7, 0x31, 0xcc, 0x68, 0x98, 0x8a, 0x66, 0x61, 0x4a, 0xce,
+	0xb0, 0xa3, 0xc9, 0x90, 0x6d, 0x60, 0x88, 0x5d, 0x14, 0xd8, 0xa4, 0x1c, 0x3d, 0x86, 0xd5, 0x81,
+	0x3b, 0xe0, 0x79, 0x4d, 0xcc, 0x68, 0xbe, 0x17, 0xf0, 0xb4, 0x82, 0x31, 0x4f, 0x6c, 0x93, 0xb8,
+	0x9e, 0xf5, 0x33, 0x12, 0x4d, 0xfa, 0x02, 0x9e, 0x92, 0xa3, 0x47, 0xb0, 0xe2, 0x5c, 0xc6, 0xa1,
+	0x59, 0x0e, 0x4d, 0x8a, 0x19, 0x52, 0x3e, 0xe6, 0xb3, 0x3d, 0xe9, 0x96, 0x9c, 0x40, 0x26, 0xc4,
+	0x2c, 0xde, 0x7d, 0xd1, 0x7e, 0xcf, 0xa9, 0xd6, 0xbe, 0x96, 0x70, 0xe0, 0xf0, 0x99, 0x3a, 0xb4,
+	0x07, 0x1b, 0x52, 0x5e, 0xad, 0x1d, 0xf4, 0x9c, 0xda, 0xfe, 0x7e, 0x5b, 0x18, 0xe5, 0xb9, 0xd1,
+	0x6c, 0x65, 0xc4, 0xaa, 0xb6, 0xff, 0xac, 0xe7, 0xec, 0x57, 0xab, 0x72, 0xaa, 0xe5, 0x98, 0x55,
+	0x5c, 0xc9, 0x72, 0x4b, 0x2a, 0xf6, 0xab, 0xb5, 0x9e, 0x53, 0x7d, 0x5a, 0xfb, 0x4a, 0x9a, 0x15,
+	0xb8, 0xd9, 0x35, 0x5a, 0x74, 0x00, 0x5b, 0xfe, 0x32, 0x9e, 0xd6, 0xf6, 0x7a, 0x4e, 0x75, 0xbf,
+	0x7a, 0x20, 0x0d, 0x8b, 0xdc, 0xf0, 0x3a, 0xb5, 0xf2, 0x1d, 0x94, 0x44, 0x50, 0x1e, 0x93, 0x81,
+	0x9f, 0x37, 0x9f, 0x56, 0x90, 0xfe, 0x9e, 0x82, 0x72, 0x92, 0x22, 0x08, 0xe4, 0x07, 0x50, 0x1c,
+	0x38, 0x2e, 0xcb, 0x17, 0x62, 0x86, 0x47, 0x55, 0x01, 0x27, 0xa4, 0xa8, 0x02, 0x28, 0x90, 0x1c,
+	0x39, 0x26, 0x79, 0xe3, 0xb8, 0xa6, 0x1f, 0xdc, 0x33, 0x34, 0x2c, 0x41, 0xce, 0xc9, 0xa0, 0x4b,
+	0x06, 0x8e, 0x6d, 0xfa, 0xb1, 0x1e, 0x91, 0xf0, 0xda, 0xed, 0x50, 0x63, 0x18, 0x72, 0x89, 0x60,
+	0x4f, 0x48, 0x99, 0xc3, 0x27, 0xb6, 0xe4, 0x37, 0xce, 0x86, 0x24, 0xc4, 0x8b, 0x80, 0xbf, 0x46,
+	0xab, 0x9c, 0xf8, 0x7d, 0x6b, 0x78, 0x2a, 0x8b, 0x6e, 0x77, 0x0b, 0x96, 0x2c, 0x9b, 0x9e, 0xeb,
+	0xf2, 0x65, 0x61, 0x09, 0x67, 0xd8, 0xb0, 0x69, 0xa2, 0x0d, 0xc8, 0x38, 0xf6, 0x84, 0xc9, 0xd3,
+	0x5c, 0xbe, 0xe8, 0xd8, 0x93, 0xa6, 0xa9, 0xfc, 0x3a, 0x05, 0x9f, 0x33, 0xa6, 0xd1, 0xc0, 0xf2,
+	0xcb, 0xc2, 0xb1, 0x6b, 0x8c, 0x88, 0xc6, 0xca, 0x94, 0x49, 0xcc, 0xce, 0xe8, 0xa3, 0x9b, 0x76,
+	0x74, 0x2b, 0xd2, 0x69, 0x73, 0xd7, 0x35, 0xe6, 0xc2, 0x5e, 0x9b, 0xbd, 0x3c, 0xb8, 0xc4, 0x23,
+	0x94, 0x7b, 0x2b, 0x8b, 0xc5, 0x40, 0x2d, 0xc2, 0xb2, 0xe5, 0xe9, 0x13, 0xdb, 0xd2, 0x2d, 0xde,
+	0x91, 0x1f, 0xc1, 0xea, 0x09, 0xa1, 0xf8, 0x8a, 0xd7, 0xec, 0x9f, 0xfa, 0x50, 0x7f, 0x5b, 0x86,
+	0xcd, 0xa4, 0x7b, 0x64, 0x40, 0x6c, 0x27, 0xa8, 0x1a, 0x73, 0x01, 0xd9, 0x56, 0x9c, 0xac, 0x91,
+	0x92, 0x74, 0xe8, 0x21, 0x14, 0xc7, 0x8e, 0x67, 0xb1, 0x66, 0x52, 0x37, 0x5d, 0xeb, 0x5c, 0x3c,
+	0x42, 0xa6, 0x91, 0xc6, 0x05, 0x5f, 0x5e, 0x67, 0x62, 0x06, 0xb4, 0xc9, 0x85, 0x11, 0x01, 0x2e,
+	0x70, 0xe0, 0x3c, 0x2e, 0xf8, 0x72, 0x01, 0x7c, 0x0e, 0x65, 0x93, 0x0c, 0xad, 0x91, 0x45, 0x89,
+	0xab, 0x8f, 0x2c, 0xcf, 0xd3, 0x4d, 0x42, 0xe5, 0x41, 0xb1, 0xc8, 0x4d, 0x16, 0xf0, 0x66, 0x80,
+	0x78, 0x69, 0x79, 0x5e, 0xdd, 0xd7, 0xa3, 0xbb, 0x00, 0x67, 0xd6, 0x58, 0x27, 0xac, 0xb2, 0x89,
+	0x52, 0x97, 0x69, 0x2c, 0xe2, 0xdc, 0x99, 0x35, 0xe6, 0xc5, 0xce, 0x43, 0xb7, 0x81, 0x0d, 0x98,
+	0x4f, 0x65, 0x75, 0xcb, 0x34, 0x32, 0x38, 0x7b, 0x66, 0x8d, 0xfb, 0x4c, 0xc2, 0x2a, 0xc3, 0x39,
+	0x19, 0xe8, 0x41, 0x50, 0xeb, 0xde, 0xfb, 0xd1, 0x99, 0x33, 0x14, 0xd5, 0x2d, 0xd3, 0x58, 0xc2,
+	0x6b, 0xe7, 0x64, 0x70, 0xe4, 0x6b, 0xbb, 0x42, 0xc9, 0x32, 0x5c, 0x58, 0x99, 0xe4, 0x47, 0x16,
+	0x81, 0xa1, 0x3d, 0xaf, 0x75, 0x99, 0x46, 0x16, 0x6f, 0x70, 0x3b, 0xa9, 0x0f, 0x08, 0xd0, 0x77,
+	0xb0, 0x1b, 0xb7, 0x8c, 0x85, 0x34, 0x2f, 0x7d, 0x99, 0x46, 0x0e, 0x6f, 0x47, 0xad, 0xfb, 0x51,
+	0x08, 0xfa, 0x1c, 0x0a, 0x31, 0x06, 0x5e, 0xf9, 0x32, 0x0d, 0xc0, 0xcb, 0x51, 0x1b, 0xf4, 0x14,
+	0xd6, 0xe2, 0x0f, 0x26, 0x3c, 0xb0, 0xcc, 0xc1, 0x79, 0xbc, 0x1a, 0x7d, 0x2c, 0xe1, 0x8a, 0x47,
+	0xb0, 0x72, 0x75, 0x41, 0x46, 0xfa, 0x0f, 0xe4, 0xbd, 0xef, 0xcf, 0x02, 0x47, 0x2f, 0xe3, 0x02,
+	0x53, 0xbc, 0x20, 0xef, 0x43, 0x9f, 0x72, 0xe4, 0xd0, 0xf1, 0x44, 0x49, 0xcb, 0x34, 0x0a, 0x38,
+	0xcb, 0x44, 0xa7, 0x8e, 0xc7, 0x89, 0xdc, 0x2b, 0x7d, 0x3c, 0x74, 0x8c, 0x91, 0x27, 0x98, 0xca,
+	0x2b, 0x1c, 0x54, 0xc4, 0x05, 0xf7, 0xaa, 0xc3, 0xe5, 0xe2, 0x65, 0xf9, 0x4b, 0x40, 0x21, 0xd2,
+	0x76, 0x6c, 0xdd, 0x32, 0x87, 0xa4, 0x5c, 0xe2, 0xe0, 0x15, 0xbc, 0xe2, 0x83, 0x5b, 0x8e, 0xdd,
+	0x34, 0x87, 0x3c, 0x5c, 0xdd, 0x2b, 0xdd, 0x19, 0x0d, 0xac, 0xf2, 0x2a, 0xc7, 0x94, 0x70, 0xc6,
+	0xbd, 0x62, 0xd9, 0xca, 0x54, 0x54, 0xaa, 0x10, 0x57, 0xad, 0xe2, 0x0c, 0x15, 0xaa, 0xe7, 0xb0,
+	0x2d, 0xad, 0x74, 0x59, 0x77, 0xf5, 0x81, 0x3b, 0x90, 0x0b, 0x5b, 0xe3, 0x60, 0x84, 0x37, 0x04,
+	0x8f, 0x3c, 0xc4, 0x8e, 0xe4, 0x59, 0x89, 0x76, 0x21, 0xeb, 0x5e, 0xe9, 0x67, 0xbc, 0x56, 0xae,
+	0x73, 0xe8, 0x5a, 0xd8, 0xbe, 0xdf, 0x05, 0x60, 0xab, 0x97, 0xc7, 0xe1, 0x06, 0x57, 0xaf, 0x47,
+	0x7b, 0xf4, 0x5d, 0xc8, 0x52, 0xdf, 0x7a, 0x93, 0xab, 0x37, 0xc2, 0xd7, 0x82, 0xbb, 0x00, 0x34,
+	0xb4, 0xde, 0xe2, 0xea, 0xcd, 0x68, 0xff, 0xff, 0x19, 0x2c, 0x9f, 0x11, 0x57, 0x77, 0x89, 0xbc,
+	0x82, 0x28, 0x73, 0xc8, 0x16, 0xce, 0x9f, 0xb1, 0x5a, 0x20, 0x2f, 0x21, 0xee, 0x43, 0x7e, 0x38,
+	0x30, 0x2f, 0xfc, 0x0d, 0xdb, 0xe6, 0x98, 0x32, 0x06, 0x26, 0x94, 0xbb, 0xc5, 0x96, 0x69, 0x5a,
+	0x3e, 0x62, 0x87, 0x23, 0xb6, 0x71, 0xce, 0x35, 0x2d, 0x09, 0xb8, 0x03, 0x39, 0x6a, 0x8d, 0x88,
+	0x47, 0x8d, 0xd1, 0xb8, 0xbc, 0xcb, 0xb3, 0x7d, 0x07, 0x87, 0x22, 0x75, 0x19, 0xc0, 0xf2, 0x74,
+	0x59, 0x28, 0xd4, 0x3c, 0xe4, 0x2c, 0x4f, 0x17, 0xb5, 0x41, 0x5d, 0x83, 0x55, 0xcb, 0xd3, 0xe3,
+	0xf5, 0x40, 0x0a, 0xe3, 0xb9, 0xaf, 0xde, 0x86, 0x5d, 0x8b, 0x25, 0xf6, 0xec, 0x3c, 0x57, 0x57,
+	0xa0, 0x60, 0x79, 0x7a, 0x98, 0xca, 0xb2, 0x14, 0x06, 0xa9, 0xab, 0xee, 0x40, 0xd9, 0xf2, 0xf4,
+	0x99, 0xb9, 0xaa, 0xde, 0x82, 0x9d, 0x40, 0x37, 0x95, 0x91, 0xea, 0x3d, 0xb8, 0x33, 0xa5, 0x8d,
+	0x65, 0x9d, 0x8a, 0xa0, 0x94, 0x44, 0xa8, 0x65, 0xd8, 0x9c, 0x9a, 0x4f, 0xac, 0x64, 0x1d, 0x90,
+	0xe5, 0xe9, 0x89, 0x54, 0x91, 0xeb, 0x0d, 0xd2, 0x42, 0xa2, 0x12, 0x79, 0xa0, 0x6e, 0xc1, 0x46,
+	0x4c, 0xea, 0xc7, 0xbc, 0xf4, 0xb1, 0x8c, 0x53, 0x39, 0x92, 0x01, 0xad, 0xde, 0x81, 0x5b, 0xa1,
+	0x6e, 0x3a, 0x86, 0xd5, 0x02, 0xe4, 0x85, 0x9e, 0x47, 0x9a, 0x74, 0x65, 0x18, 0x99, 0x52, 0x4f,
+	0xe3, 0xfa, 0x30, 0xf6, 0xd4, 0x55, 0x58, 0x61, 0xae, 0x8e, 0xc4, 0x9a, 0x5a, 0x82, 0xa2, 0xe5,
+	0xe9, 0x91, 0xc8, 0xf2, 0x59, 0x83, 0x40, 0x92, 0x0f, 0x1c, 0x44, 0x89, 0xf2, 0xab, 0x45, 0xd8,
+	0xbd, 0xe1, 0xe0, 0x44, 0x77, 0x21, 0xcf, 0x7a, 0x66, 0x9d, 0x84, 0x6d, 0x74, 0xe6, 0x86, 0x36,
+	0x3a, 0x13, 0xb4, 0xd1, 0x9b, 0x90, 0x39, 0x67, 0x5c, 0xa2, 0xb3, 0xc8, 0x60, 0x39, 0x42, 0xff,
+	0x15, 0x69, 0xa2, 0x75, 0x89, 0xe0, 0x27, 0x0c, 0x5e, 0x09, 0xe4, 0xc7, 0x01, 0x34, 0xe8, 0x95,
+	0x7d, 0xe8, 0xa2, 0x80, 0x06, 0x72, 0x09, 0x7d, 0x0c, 0x28, 0xf0, 0x2c, 0x31, 0x7d, 0x30, 0x3f,
+	0x58, 0x70, 0x29, 0xec, 0xa1, 0x43, 0xe2, 0xa0, 0x55, 0xf6, 0xb1, 0x4b, 0x82, 0x38, 0x90, 0x4b,
+	0xe8, 0xc3, 0xb0, 0x83, 0xf6, 0x91, 0xfc, 0x8c, 0xc1, 0x45, 0x5f, 0x2c, 0x81, 0x8f, 0xa0, 0x24,
+	0xf4, 0xfa, 0xb3, 0x3d, 0x3d, 0xd2, 0x41, 0x67, 0x70, 0x51, 0xc8, 0x9f, 0xed, 0x05, 0x6d, 0xed,
+	0x96, 0x8f, 0xdc, 0xd7, 0xa9, 0xa3, 0x57, 0x6b, 0x5f, 0xeb, 0x91, 0x1e, 0x3a, 0x83, 0xd7, 0xa4,
+	0x81, 0x68, 0xa1, 0xdb, 0x7e, 0x5b, 0x5b, 0x96, 0x56, 0xd5, 0xda, 0x01, 0x33, 0xab, 0xed, 0xef,
+	0xfb, 0x66, 0xfc, 0x2c, 0xc1, 0xeb, 0x42, 0x9f, 0x68, 0xa2, 0x43, 0xbb, 0xda, 0xfe, 0x33, 0x66,
+	0xb7, 0x5f, 0xad, 0xea, 0x91, 0x3e, 0x3a, 0xb0, 0xf3, 0xdb, 0xe8, 0xb6, 0xdf, 0x0e, 0x6f, 0x4b,
+	0xbb, 0xfd, 0x6a, 0x8d, 0x2f, 0xf3, 0x69, 0xed, 0x2b, 0x3d, 0xd2, 0x49, 0x67, 0xf0, 0x86, 0x00,
+	0x04, 0x8d, 0xb4, 0xb4, 0x7c, 0x0e, 0x3b, 0xfe, 0x4a, 0x9f, 0xd6, 0xf6, 0xb8, 0xe9, 0x7e, 0xf5,
+	0x40, 0x8f, 0xf4, 0xd2, 0x19, 0xbc, 0x29, 0xd7, 0x1a, 0xb4, 0xd2, 0xc2, 0x56, 0xf9, 0x47, 0x1a,
+	0x1e, 0x7c, 0xa8, 0x95, 0x93, 0x5d, 0x90, 0x0a, 0xd9, 0xc9, 0xd8, 0xa3, 0x2e, 0x31, 0x46, 0xb2,
+	0xc9, 0x8e, 0x5e, 0x32, 0xdd, 0xc4, 0x10, 0xd8, 0xa1, 0x63, 0x00, 0xd3, 0xf9, 0xd1, 0x96, 0x2c,
+	0xe9, 0x4f, 0x62, 0x89, 0x58, 0xa2, 0x5f, 0xa6, 0xe0, 0x01, 0x4f, 0x73, 0x22, 0xc1, 0x22, 0x56,
+	0x74, 0x22, 0xe1, 0xfa, 0x78, 0xa4, 0x9f, 0x3b, 0xee, 0xc8, 0xa0, 0xf2, 0x92, 0xf3, 0x20, 0xf1,
+	0x0e, 0xfd, 0xe1, 0xe7, 0xad, 0x1c, 0x73, 0x7b, 0x7c, 0xdf, 0xb9, 0x1e, 0x2b, 0x20, 0xca, 0x53,
+	0xc8, 0x88, 0x5f, 0xfc, 0x3a, 0xb2, 0xd1, 0xc4, 0xbd, 0xb7, 0x7a, 0xef, 0x4d, 0x5b, 0x57, 0x9b,
+	0x3d, 0x71, 0x01, 0xda, 0x6d, 0x7e, 0xdf, 0x7b, 0xab, 0x1f, 0xb7, 0xfb, 0x98, 0xcb, 0x52, 0xca,
+	0x6f, 0xc5, 0xfd, 0x5e, 0xd0, 0xb2, 0x4a, 0x17, 0x7f, 0x62, 0xcf, 0xca, 0x72, 0xde, 0xa3, 0x06,
+	0x9d, 0x88, 0x9c, 0xcf, 0x61, 0x39, 0x62, 0x45, 0xe4, 0xdc, 0xb0, 0x86, 0xba, 0x4b, 0x0c, 0xcf,
+	0xb1, 0x79, 0xba, 0xe7, 0x30, 0x30, 0x11, 0xe6, 0x12, 0xb4, 0xcd, 0x0f, 0x6c, 0x7e, 0xb5, 0xc2,
+	0x33, 0x3c, 0xc5, 0x8e, 0x6b, 0xbe, 0x16, 0xe5, 0xe7, 0x8b, 0xb0, 0x72, 0x42, 0x28, 0xbf, 0xf1,
+	0xf7, 0x7b, 0xe9, 0x6f, 0x12, 0x57, 0xe8, 0xf9, 0xda, 0xed, 0xb8, 0x3f, 0x13, 0x97, 0xf5, 0xac,
+	0x87, 0xf7, 0x0d, 0xd0, 0x37, 0xb0, 0x34, 0x11, 0x17, 0xca, 0x72, 0xc3, 0xef, 0x5e, 0x7f, 0xe1,
+	0xec, 0x5b, 0xfb, 0x16, 0xe8, 0x10, 0xf2, 0x8e, 0xb8, 0x4a, 0xe4, 0x04, 0xf3, 0xb3, 0x26, 0x4f,
+	0xdc, 0x35, 0x36, 0xe6, 0x70, 0xd4, 0x06, 0x35, 0xa1, 0xe8, 0xd8, 0x93, 0xc8, 0xad, 0x13, 0xf7,
+	0xc7, 0xac, 0x65, 0xc4, 0x2f, 0xa7, 0x1a, 0x73, 0x38, 0x61, 0x88, 0x30, 0x14, 0x08, 0x7d, 0x17,
+	0x5e, 0x81, 0x70, 0xdf, 0xe5, 0x6b, 0x5f, 0x7c, 0xfc, 0x05, 0x4d, 0x63, 0x0e, 0xc7, 0x29, 0xd0,
+	0xff, 0xf0, 0xb7, 0x42, 0xa9, 0xe6, 0x15, 0x34, 0x5f, 0xdb, 0x9d, 0x22, 0x0c, 0x5f, 0x53, 0x1b,
+	0x73, 0x38, 0x62, 0x80, 0x54, 0x00, 0x87, 0xaf, 0x9c, 0x3f, 0xd9, 0x12, 0x37, 0xbf, 0x37, 0x65,
+	0x9e, 0x78, 0xe3, 0x63, 0x1c, 0xa1, 0x15, 0x3a, 0x85, 0x25, 0x16, 0x4f, 0x8c, 0x20, 0xcb, 0x09,
+	0x9e, 0x7e, 0x42, 0xb6, 0x04, 0x5b, 0x26, 0x29, 0xd0, 0x01, 0xf8, 0xb1, 0xc4, 0xeb, 0x71, 0xbe,
+	0x76, 0x2b, 0xce, 0x16, 0x7f, 0x4f, 0x63, 0x96, 0x12, 0xae, 0xe6, 0x60, 0xc9, 0x15, 0x52, 0xe5,
+	0x77, 0x59, 0xfe, 0x8e, 0x2f, 0xa3, 0x50, 0xa6, 0xc7, 0xf3, 0x20, 0xdc, 0xc5, 0xc5, 0x98, 0x12,
+	0x27, 0x8e, 0x81, 0x2b, 0x5d, 0x8e, 0x0c, 0x52, 0x42, 0x83, 0x1c, 0x71, 0x5d, 0x11, 0xfe, 0xf2,
+	0x8e, 0xfc, 0xe1, 0x4d, 0xe6, 0xfc, 0x00, 0x13, 0x70, 0x1c, 0x5a, 0xa2, 0x6f, 0x23, 0x99, 0x20,
+	0x82, 0xf1, 0xce, 0x75, 0x99, 0x20, 0x88, 0x62, 0xa9, 0xf0, 0x6d, 0x98, 0x0a, 0x0b, 0xd7, 0xec,
+	0x54, 0xe2, 0xdb, 0x4b, 0x34, 0x17, 0x5e, 0xc0, 0xf2, 0x58, 0xc4, 0x39, 0xb5, 0x89, 0xeb, 0xc9,
+	0xe0, 0xfb, 0xfc, 0xc6, 0x64, 0x88, 0xf0, 0xc4, 0x8c, 0xd1, 0xab, 0xa9, 0xac, 0x10, 0xa1, 0xf7,
+	0xf0, 0x03, 0x59, 0x11, 0x21, 0x4c, 0x66, 0xc7, 0x19, 0xac, 0xc6, 0x42, 0x3b, 0x12, 0x91, 0xb5,
+	0x8f, 0xcf, 0x90, 0xc8, 0x04, 0xd3, 0x74, 0x48, 0x8b, 0x65, 0x8b, 0x88, 0xd6, 0xcf, 0x6e, 0xc8,
+	0x96, 0x08, 0x5b, 0x34, 0x6b, 0x5e, 0xf0, 0xa7, 0xef, 0x38, 0xb6, 0xef, 0x27, 0x19, 0xaa, 0xf7,
+	0x6f, 0xc8, 0x9c, 0xd8, 0x73, 0x47, 0x4c, 0x51, 0x9f, 0x5f, 0x72, 0x04, 0x4c, 0xc0, 0x99, 0xaa,
+	0x9f, 0x7c, 0xe0, 0xf0, 0xba, 0x15, 0xf2, 0xa0, 0xff, 0x0e, 0xf3, 0x28, 0x3f, 0xab, 0xec, 0x25,
+	0x0e, 0x8f, 0x48, 0x22, 0x29, 0x55, 0xc8, 0x88, 0xf0, 0x47, 0xeb, 0x50, 0xea, 0xf6, 0x0e, 0x7b,
+	0xfd, 0x6e, 0xec, 0xcb, 0x53, 0x06, 0xd2, 0xed, 0x17, 0xa5, 0x14, 0xff, 0x96, 0x8c, 0x71, 0x1b,
+	0x97, 0xd2, 0xca, 0xef, 0x53, 0x90, 0x8f, 0xc4, 0x3c, 0x33, 0xc4, 0xda, 0x61, 0xb7, 0xdd, 0x8a,
+	0x19, 0xae, 0x40, 0xbe, 0xdf, 0xea, 0xf6, 0x3b, 0x9d, 0x36, 0xee, 0xf1, 0xcf, 0x56, 0x1b, 0xb0,
+	0xda, 0x6c, 0xbd, 0x3e, 0x3c, 0x6d, 0xd6, 0xf5, 0xba, 0xf6, 0xba, 0x79, 0xa4, 0xe9, 0xcd, 0x7a,
+	0x29, 0x1d, 0x15, 0x33, 0xa8, 0xde, 0x7b, 0xdb, 0xd1, 0x4a, 0xf3, 0x28, 0x0f, 0x4b, 0xbd, 0xe6,
+	0x4b, 0xad, 0xdd, 0xef, 0x95, 0x16, 0xd8, 0x0c, 0x3e, 0x06, 0x6b, 0xaf, 0x04, 0x64, 0x91, 0x9d,
+	0x96, 0xcd, 0x56, 0x4f, 0xc3, 0xad, 0xc3, 0x53, 0x5d, 0xac, 0x2d, 0x23, 0x64, 0xd1, 0x49, 0x4a,
+	0x4b, 0x2a, 0x40, 0xd6, 0x95, 0x4f, 0xae, 0xbc, 0x86, 0x95, 0x6e, 0xe2, 0xc4, 0x4a, 0x7e, 0x7d,
+	0x4f, 0x7d, 0xf4, 0xd7, 0xf7, 0x48, 0x11, 0xfa, 0x67, 0x0a, 0x4a, 0xdd, 0x4f, 0x29, 0x42, 0xdd,
+	0x7f, 0xaf, 0x08, 0x75, 0x3f, 0xae, 0x08, 0xfd, 0x94, 0xed, 0xdd, 0xfb, 0x29, 0xbb, 0xab, 0x58,
+	0xb0, 0xd1, 0xb5, 0xec, 0x8b, 0x21, 0x49, 0x36, 0x04, 0x3b, 0x90, 0xa5, 0x86, 0x7b, 0x41, 0x68,
+	0x70, 0xa9, 0x17, 0x8c, 0xd1, 0x5e, 0xe0, 0x40, 0x79, 0xde, 0xef, 0xcc, 0xac, 0xb3, 0x1c, 0x81,
+	0x03, 0x5f, 0xbf, 0x82, 0xcd, 0xe4, 0x54, 0xd2, 0xe1, 0x5f, 0x87, 0x3b, 0x2d, 0xb7, 0x71, 0xf7,
+	0x86, 0xc2, 0x8d, 0xc3, 0xb0, 0x08, 0x56, 0xdf, 0xfd, 0x4f, 0xad, 0xbe, 0xfb, 0xc1, 0xd5, 0x77,
+	0x3f, 0x6d, 0xf5, 0xdd, 0x6b, 0x57, 0x5f, 0xfb, 0x63, 0x0a, 0x72, 0x9a, 0x0f, 0x44, 0x18, 0xf2,
+	0x27, 0x84, 0x6a, 0x57, 0x02, 0x8e, 0xa2, 0xe7, 0xc6, 0xcc, 0x1d, 0xda, 0xb9, 0x7f, 0x03, 0x42,
+	0x2e, 0x0d, 0x43, 0xbe, 0x7b, 0x23, 0x67, 0xf7, 0x83, 0x9c, 0xc9, 0xf5, 0xab, 0x18, 0x6e, 0x3b,
+	0xee, 0x45, 0xc5, 0x19, 0x13, 0x7b, 0xe0, 0xb8, 0x66, 0x45, 0xfc, 0x75, 0x4d, 0x68, 0xf7, 0xbf,
+	0xd5, 0x0b, 0x8b, 0xbe, 0x9b, 0x9c, 0x55, 0x06, 0xce, 0xe8, 0x89, 0x8f, 0x7a, 0x22, 0x50, 0x5f,
+	0xca, 0xbf, 0xc1, 0xb9, 0xdc, 0x7f, 0x72, 0xe1, 0x84, 0x7f, 0xb4, 0x73, 0x96, 0xe1, 0xf2, 0xaf,
+	0xfe, 0x15, 0x00, 0x00, 0xff, 0xff, 0x86, 0xb9, 0xfc, 0x29, 0xd6, 0x23, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// ExtensionClient is the client API for Extension service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type ExtensionClient interface {
+	// Get a single attribute
+	GetExtValue(ctx context.Context, in *SingleGetValueRequest, opts ...grpc.CallOption) (*SingleGetValueResponse, error)
+	// Set a single attribute
+	SetExtValue(ctx context.Context, in *SingleSetValueRequest, opts ...grpc.CallOption) (*SingleSetValueResponse, error)
+}
+
+type extensionClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewExtensionClient(cc *grpc.ClientConn) ExtensionClient {
+	return &extensionClient{cc}
+}
+
+func (c *extensionClient) GetExtValue(ctx context.Context, in *SingleGetValueRequest, opts ...grpc.CallOption) (*SingleGetValueResponse, error) {
+	out := new(SingleGetValueResponse)
+	err := c.cc.Invoke(ctx, "/extension.Extension/GetExtValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *extensionClient) SetExtValue(ctx context.Context, in *SingleSetValueRequest, opts ...grpc.CallOption) (*SingleSetValueResponse, error) {
+	out := new(SingleSetValueResponse)
+	err := c.cc.Invoke(ctx, "/extension.Extension/SetExtValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// ExtensionServer is the server API for Extension service.
+type ExtensionServer interface {
+	// Get a single attribute
+	GetExtValue(context.Context, *SingleGetValueRequest) (*SingleGetValueResponse, error)
+	// Set a single attribute
+	SetExtValue(context.Context, *SingleSetValueRequest) (*SingleSetValueResponse, error)
+}
+
+// UnimplementedExtensionServer can be embedded to have forward compatible implementations.
+type UnimplementedExtensionServer struct {
+}
+
+func (*UnimplementedExtensionServer) GetExtValue(ctx context.Context, req *SingleGetValueRequest) (*SingleGetValueResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetExtValue not implemented")
+}
+func (*UnimplementedExtensionServer) SetExtValue(ctx context.Context, req *SingleSetValueRequest) (*SingleSetValueResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SetExtValue not implemented")
+}
+
+func RegisterExtensionServer(s *grpc.Server, srv ExtensionServer) {
+	s.RegisterService(&_Extension_serviceDesc, srv)
+}
+
+func _Extension_GetExtValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(SingleGetValueRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ExtensionServer).GetExtValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/extension.Extension/GetExtValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ExtensionServer).GetExtValue(ctx, req.(*SingleGetValueRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _Extension_SetExtValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(SingleSetValueRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(ExtensionServer).SetExtValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/extension.Extension/SetExtValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(ExtensionServer).SetExtValue(ctx, req.(*SingleSetValueRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _Extension_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "extension.Extension",
+	HandlerType: (*ExtensionServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetExtValue",
+			Handler:    _Extension_GetExtValue_Handler,
+		},
+		{
+			MethodName: "SetExtValue",
+			Handler:    _Extension_SetExtValue_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/extensions.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/health/health.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/health/health.pb.go
new file mode 100644
index 0000000..8b26126
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/health/health.pb.go
@@ -0,0 +1,208 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/health.proto
+
+package health
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Health states
+type HealthStatus_HealthState int32
+
+const (
+	HealthStatus_HEALTHY    HealthStatus_HealthState = 0
+	HealthStatus_OVERLOADED HealthStatus_HealthState = 1
+	HealthStatus_DYING      HealthStatus_HealthState = 2
+)
+
+var HealthStatus_HealthState_name = map[int32]string{
+	0: "HEALTHY",
+	1: "OVERLOADED",
+	2: "DYING",
+}
+
+var HealthStatus_HealthState_value = map[string]int32{
+	"HEALTHY":    0,
+	"OVERLOADED": 1,
+	"DYING":      2,
+}
+
+func (x HealthStatus_HealthState) String() string {
+	return proto.EnumName(HealthStatus_HealthState_name, int32(x))
+}
+
+func (HealthStatus_HealthState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_dd1fc2b2d96d69b8, []int{0, 0}
+}
+
+// Encode health status of a Voltha instance
+type HealthStatus struct {
+	// Current state of health of this Voltha instance
+	State                HealthStatus_HealthState `protobuf:"varint,1,opt,name=state,proto3,enum=health.HealthStatus_HealthState" json:"state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                 `json:"-"`
+	XXX_unrecognized     []byte                   `json:"-"`
+	XXX_sizecache        int32                    `json:"-"`
+}
+
+func (m *HealthStatus) Reset()         { *m = HealthStatus{} }
+func (m *HealthStatus) String() string { return proto.CompactTextString(m) }
+func (*HealthStatus) ProtoMessage()    {}
+func (*HealthStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_dd1fc2b2d96d69b8, []int{0}
+}
+
+func (m *HealthStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_HealthStatus.Unmarshal(m, b)
+}
+func (m *HealthStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_HealthStatus.Marshal(b, m, deterministic)
+}
+func (m *HealthStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_HealthStatus.Merge(m, src)
+}
+func (m *HealthStatus) XXX_Size() int {
+	return xxx_messageInfo_HealthStatus.Size(m)
+}
+func (m *HealthStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_HealthStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_HealthStatus proto.InternalMessageInfo
+
+func (m *HealthStatus) GetState() HealthStatus_HealthState {
+	if m != nil {
+		return m.State
+	}
+	return HealthStatus_HEALTHY
+}
+
+func init() {
+	proto.RegisterEnum("health.HealthStatus_HealthState", HealthStatus_HealthState_name, HealthStatus_HealthState_value)
+	proto.RegisterType((*HealthStatus)(nil), "health.HealthStatus")
+}
+
+func init() { proto.RegisterFile("voltha_protos/health.proto", fileDescriptor_dd1fc2b2d96d69b8) }
+
+var fileDescriptor_dd1fc2b2d96d69b8 = []byte{
+	// 287 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2a, 0xcb, 0xcf, 0x29,
+	0xc9, 0x48, 0x8c, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x2f, 0xd6, 0xcf, 0x48, 0x4d, 0xcc, 0x29, 0xc9,
+	0xd0, 0x03, 0xf3, 0x84, 0xd8, 0x20, 0x3c, 0x29, 0x99, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, 0xfd,
+	0xc4, 0x82, 0x4c, 0xfd, 0xc4, 0xbc, 0xbc, 0xfc, 0x92, 0xc4, 0x92, 0xcc, 0xfc, 0xbc, 0x62, 0x88,
+	0x2a, 0x29, 0x69, 0xa8, 0x2c, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x9a, 0x5b, 0x50, 0x52, 0x09,
+	0x91, 0x54, 0xaa, 0xe5, 0xe2, 0xf1, 0x00, 0x1b, 0x12, 0x5c, 0x92, 0x58, 0x52, 0x5a, 0x2c, 0x64,
+	0xc6, 0xc5, 0x5a, 0x5c, 0x92, 0x58, 0x92, 0x2a, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x67, 0xa4, 0xa0,
+	0x07, 0xb5, 0x10, 0x59, 0x11, 0x12, 0x27, 0x35, 0x08, 0xa2, 0x5c, 0xc9, 0x94, 0x8b, 0x1b, 0x49,
+	0x54, 0x88, 0x9b, 0x8b, 0xdd, 0xc3, 0xd5, 0xd1, 0x27, 0xc4, 0x23, 0x52, 0x80, 0x41, 0x88, 0x8f,
+	0x8b, 0xcb, 0x3f, 0xcc, 0x35, 0xc8, 0xc7, 0xdf, 0xd1, 0xc5, 0xd5, 0x45, 0x80, 0x51, 0x88, 0x93,
+	0x8b, 0xd5, 0x25, 0xd2, 0xd3, 0xcf, 0x5d, 0x80, 0xc9, 0x28, 0x91, 0x8b, 0x17, 0xaa, 0x2d, 0xb5,
+	0xa8, 0x2c, 0x33, 0x39, 0x55, 0x28, 0x80, 0x8b, 0xdf, 0x3d, 0xb5, 0x04, 0xc5, 0x49, 0x62, 0x7a,
+	0x10, 0x0f, 0xe8, 0xc1, 0x3c, 0xa0, 0xe7, 0x0a, 0xf2, 0x80, 0x94, 0x08, 0x36, 0xb7, 0x29, 0xf1,
+	0x37, 0x5d, 0x7e, 0x32, 0x99, 0x89, 0x53, 0x88, 0x1d, 0x1a, 0x54, 0x4e, 0x3e, 0x5c, 0x52, 0xf9,
+	0x45, 0xe9, 0x7a, 0xf9, 0x05, 0xa9, 0x79, 0xc9, 0xf9, 0x45, 0x29, 0x7a, 0x90, 0xf0, 0x84, 0xea,
+	0x8d, 0xd2, 0x4b, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x87, 0x29, 0xd1,
+	0x87, 0x28, 0xd1, 0x85, 0x06, 0x79, 0x99, 0xa9, 0x7e, 0x7a, 0x3e, 0xd4, 0xb4, 0x24, 0x36, 0xb0,
+	0xa0, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x83, 0x1f, 0x28, 0x85, 0x97, 0x01, 0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// HealthServiceClient is the client API for HealthService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type HealthServiceClient interface {
+	// Return current health status of a Voltha instance
+	GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*HealthStatus, error)
+}
+
+type healthServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewHealthServiceClient(cc *grpc.ClientConn) HealthServiceClient {
+	return &healthServiceClient{cc}
+}
+
+func (c *healthServiceClient) GetHealthStatus(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*HealthStatus, error) {
+	out := new(HealthStatus)
+	err := c.cc.Invoke(ctx, "/health.HealthService/GetHealthStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// HealthServiceServer is the server API for HealthService service.
+type HealthServiceServer interface {
+	// Return current health status of a Voltha instance
+	GetHealthStatus(context.Context, *empty.Empty) (*HealthStatus, error)
+}
+
+// UnimplementedHealthServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedHealthServiceServer struct {
+}
+
+func (*UnimplementedHealthServiceServer) GetHealthStatus(ctx context.Context, req *empty.Empty) (*HealthStatus, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetHealthStatus not implemented")
+}
+
+func RegisterHealthServiceServer(s *grpc.Server, srv HealthServiceServer) {
+	s.RegisterService(&_HealthService_serviceDesc, srv)
+}
+
+func _HealthService_GetHealthStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(HealthServiceServer).GetHealthStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/health.HealthService/GetHealthStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(HealthServiceServer).GetHealthStatus(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _HealthService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "health.HealthService",
+	HandlerType: (*HealthServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetHealthStatus",
+			Handler:    _HealthService_GetHealthStatus_Handler,
+		},
+	},
+	Streams:  []grpc.StreamDesc{},
+	Metadata: "voltha_protos/health.proto",
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_alarm_db.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_alarm_db.pb.go
new file mode 100644
index 0000000..b585bbb
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_alarm_db.pb.go
@@ -0,0 +1,512 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/omci_alarm_db.proto
+
+package omci
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AlarmOpenOmciEventType_OpenOmciEventType int32
+
+const (
+	AlarmOpenOmciEventType_state_change AlarmOpenOmciEventType_OpenOmciEventType = 0
+)
+
+var AlarmOpenOmciEventType_OpenOmciEventType_name = map[int32]string{
+	0: "state_change",
+}
+
+var AlarmOpenOmciEventType_OpenOmciEventType_value = map[string]int32{
+	"state_change": 0,
+}
+
+func (x AlarmOpenOmciEventType_OpenOmciEventType) String() string {
+	return proto.EnumName(AlarmOpenOmciEventType_OpenOmciEventType_name, int32(x))
+}
+
+func (AlarmOpenOmciEventType_OpenOmciEventType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{6, 0}
+}
+
+type AlarmAttributeData struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value                string   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmAttributeData) Reset()         { *m = AlarmAttributeData{} }
+func (m *AlarmAttributeData) String() string { return proto.CompactTextString(m) }
+func (*AlarmAttributeData) ProtoMessage()    {}
+func (*AlarmAttributeData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{0}
+}
+
+func (m *AlarmAttributeData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmAttributeData.Unmarshal(m, b)
+}
+func (m *AlarmAttributeData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmAttributeData.Marshal(b, m, deterministic)
+}
+func (m *AlarmAttributeData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmAttributeData.Merge(m, src)
+}
+func (m *AlarmAttributeData) XXX_Size() int {
+	return xxx_messageInfo_AlarmAttributeData.Size(m)
+}
+func (m *AlarmAttributeData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmAttributeData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmAttributeData proto.InternalMessageInfo
+
+func (m *AlarmAttributeData) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *AlarmAttributeData) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+type AlarmInstanceData struct {
+	InstanceId           uint32                `protobuf:"varint,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+	Created              string                `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	Modified             string                `protobuf:"bytes,3,opt,name=modified,proto3" json:"modified,omitempty"`
+	Attributes           []*AlarmAttributeData `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *AlarmInstanceData) Reset()         { *m = AlarmInstanceData{} }
+func (m *AlarmInstanceData) String() string { return proto.CompactTextString(m) }
+func (*AlarmInstanceData) ProtoMessage()    {}
+func (*AlarmInstanceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{1}
+}
+
+func (m *AlarmInstanceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmInstanceData.Unmarshal(m, b)
+}
+func (m *AlarmInstanceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmInstanceData.Marshal(b, m, deterministic)
+}
+func (m *AlarmInstanceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmInstanceData.Merge(m, src)
+}
+func (m *AlarmInstanceData) XXX_Size() int {
+	return xxx_messageInfo_AlarmInstanceData.Size(m)
+}
+func (m *AlarmInstanceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmInstanceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmInstanceData proto.InternalMessageInfo
+
+func (m *AlarmInstanceData) GetInstanceId() uint32 {
+	if m != nil {
+		return m.InstanceId
+	}
+	return 0
+}
+
+func (m *AlarmInstanceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *AlarmInstanceData) GetModified() string {
+	if m != nil {
+		return m.Modified
+	}
+	return ""
+}
+
+func (m *AlarmInstanceData) GetAttributes() []*AlarmAttributeData {
+	if m != nil {
+		return m.Attributes
+	}
+	return nil
+}
+
+type AlarmClassData struct {
+	ClassId              uint32               `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Instances            []*AlarmInstanceData `protobuf:"bytes,2,rep,name=instances,proto3" json:"instances,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *AlarmClassData) Reset()         { *m = AlarmClassData{} }
+func (m *AlarmClassData) String() string { return proto.CompactTextString(m) }
+func (*AlarmClassData) ProtoMessage()    {}
+func (*AlarmClassData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{2}
+}
+
+func (m *AlarmClassData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmClassData.Unmarshal(m, b)
+}
+func (m *AlarmClassData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmClassData.Marshal(b, m, deterministic)
+}
+func (m *AlarmClassData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmClassData.Merge(m, src)
+}
+func (m *AlarmClassData) XXX_Size() int {
+	return xxx_messageInfo_AlarmClassData.Size(m)
+}
+func (m *AlarmClassData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmClassData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmClassData proto.InternalMessageInfo
+
+func (m *AlarmClassData) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *AlarmClassData) GetInstances() []*AlarmInstanceData {
+	if m != nil {
+		return m.Instances
+	}
+	return nil
+}
+
+type AlarmManagedEntity struct {
+	ClassId              uint32   `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Name                 string   `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmManagedEntity) Reset()         { *m = AlarmManagedEntity{} }
+func (m *AlarmManagedEntity) String() string { return proto.CompactTextString(m) }
+func (*AlarmManagedEntity) ProtoMessage()    {}
+func (*AlarmManagedEntity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{3}
+}
+
+func (m *AlarmManagedEntity) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmManagedEntity.Unmarshal(m, b)
+}
+func (m *AlarmManagedEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmManagedEntity.Marshal(b, m, deterministic)
+}
+func (m *AlarmManagedEntity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmManagedEntity.Merge(m, src)
+}
+func (m *AlarmManagedEntity) XXX_Size() int {
+	return xxx_messageInfo_AlarmManagedEntity.Size(m)
+}
+func (m *AlarmManagedEntity) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmManagedEntity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmManagedEntity proto.InternalMessageInfo
+
+func (m *AlarmManagedEntity) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *AlarmManagedEntity) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type AlarmMessageType struct {
+	MessageType          uint32   `protobuf:"varint,1,opt,name=message_type,json=messageType,proto3" json:"message_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmMessageType) Reset()         { *m = AlarmMessageType{} }
+func (m *AlarmMessageType) String() string { return proto.CompactTextString(m) }
+func (*AlarmMessageType) ProtoMessage()    {}
+func (*AlarmMessageType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{4}
+}
+
+func (m *AlarmMessageType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmMessageType.Unmarshal(m, b)
+}
+func (m *AlarmMessageType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmMessageType.Marshal(b, m, deterministic)
+}
+func (m *AlarmMessageType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmMessageType.Merge(m, src)
+}
+func (m *AlarmMessageType) XXX_Size() int {
+	return xxx_messageInfo_AlarmMessageType.Size(m)
+}
+func (m *AlarmMessageType) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmMessageType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmMessageType proto.InternalMessageInfo
+
+func (m *AlarmMessageType) GetMessageType() uint32 {
+	if m != nil {
+		return m.MessageType
+	}
+	return 0
+}
+
+type AlarmDeviceData struct {
+	DeviceId             string                `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Created              string                `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	LastAlarmSequence    uint32                `protobuf:"varint,3,opt,name=last_alarm_sequence,json=lastAlarmSequence,proto3" json:"last_alarm_sequence,omitempty"`
+	LastSyncTime         string                `protobuf:"bytes,4,opt,name=last_sync_time,json=lastSyncTime,proto3" json:"last_sync_time,omitempty"`
+	Version              uint32                `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
+	Classes              []*AlarmClassData     `protobuf:"bytes,6,rep,name=classes,proto3" json:"classes,omitempty"`
+	ManagedEntities      []*AlarmManagedEntity `protobuf:"bytes,7,rep,name=managed_entities,json=managedEntities,proto3" json:"managed_entities,omitempty"`
+	MessageTypes         []*AlarmMessageType   `protobuf:"bytes,8,rep,name=message_types,json=messageTypes,proto3" json:"message_types,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *AlarmDeviceData) Reset()         { *m = AlarmDeviceData{} }
+func (m *AlarmDeviceData) String() string { return proto.CompactTextString(m) }
+func (*AlarmDeviceData) ProtoMessage()    {}
+func (*AlarmDeviceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{5}
+}
+
+func (m *AlarmDeviceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmDeviceData.Unmarshal(m, b)
+}
+func (m *AlarmDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmDeviceData.Marshal(b, m, deterministic)
+}
+func (m *AlarmDeviceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmDeviceData.Merge(m, src)
+}
+func (m *AlarmDeviceData) XXX_Size() int {
+	return xxx_messageInfo_AlarmDeviceData.Size(m)
+}
+func (m *AlarmDeviceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmDeviceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmDeviceData proto.InternalMessageInfo
+
+func (m *AlarmDeviceData) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *AlarmDeviceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *AlarmDeviceData) GetLastAlarmSequence() uint32 {
+	if m != nil {
+		return m.LastAlarmSequence
+	}
+	return 0
+}
+
+func (m *AlarmDeviceData) GetLastSyncTime() string {
+	if m != nil {
+		return m.LastSyncTime
+	}
+	return ""
+}
+
+func (m *AlarmDeviceData) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *AlarmDeviceData) GetClasses() []*AlarmClassData {
+	if m != nil {
+		return m.Classes
+	}
+	return nil
+}
+
+func (m *AlarmDeviceData) GetManagedEntities() []*AlarmManagedEntity {
+	if m != nil {
+		return m.ManagedEntities
+	}
+	return nil
+}
+
+func (m *AlarmDeviceData) GetMessageTypes() []*AlarmMessageType {
+	if m != nil {
+		return m.MessageTypes
+	}
+	return nil
+}
+
+type AlarmOpenOmciEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AlarmOpenOmciEventType) Reset()         { *m = AlarmOpenOmciEventType{} }
+func (m *AlarmOpenOmciEventType) String() string { return proto.CompactTextString(m) }
+func (*AlarmOpenOmciEventType) ProtoMessage()    {}
+func (*AlarmOpenOmciEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{6}
+}
+
+func (m *AlarmOpenOmciEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmOpenOmciEventType.Unmarshal(m, b)
+}
+func (m *AlarmOpenOmciEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmOpenOmciEventType.Marshal(b, m, deterministic)
+}
+func (m *AlarmOpenOmciEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmOpenOmciEventType.Merge(m, src)
+}
+func (m *AlarmOpenOmciEventType) XXX_Size() int {
+	return xxx_messageInfo_AlarmOpenOmciEventType.Size(m)
+}
+func (m *AlarmOpenOmciEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmOpenOmciEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmOpenOmciEventType proto.InternalMessageInfo
+
+type AlarmOpenOmciEvent struct {
+	Type                 AlarmOpenOmciEventType_OpenOmciEventType `protobuf:"varint,1,opt,name=type,proto3,enum=omci.AlarmOpenOmciEventType_OpenOmciEventType" json:"type,omitempty"`
+	Data                 string                                   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                                 `json:"-"`
+	XXX_unrecognized     []byte                                   `json:"-"`
+	XXX_sizecache        int32                                    `json:"-"`
+}
+
+func (m *AlarmOpenOmciEvent) Reset()         { *m = AlarmOpenOmciEvent{} }
+func (m *AlarmOpenOmciEvent) String() string { return proto.CompactTextString(m) }
+func (*AlarmOpenOmciEvent) ProtoMessage()    {}
+func (*AlarmOpenOmciEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_8d41f1e38aadb08d, []int{7}
+}
+
+func (m *AlarmOpenOmciEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AlarmOpenOmciEvent.Unmarshal(m, b)
+}
+func (m *AlarmOpenOmciEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AlarmOpenOmciEvent.Marshal(b, m, deterministic)
+}
+func (m *AlarmOpenOmciEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AlarmOpenOmciEvent.Merge(m, src)
+}
+func (m *AlarmOpenOmciEvent) XXX_Size() int {
+	return xxx_messageInfo_AlarmOpenOmciEvent.Size(m)
+}
+func (m *AlarmOpenOmciEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_AlarmOpenOmciEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AlarmOpenOmciEvent proto.InternalMessageInfo
+
+func (m *AlarmOpenOmciEvent) GetType() AlarmOpenOmciEventType_OpenOmciEventType {
+	if m != nil {
+		return m.Type
+	}
+	return AlarmOpenOmciEventType_state_change
+}
+
+func (m *AlarmOpenOmciEvent) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterEnum("omci.AlarmOpenOmciEventType_OpenOmciEventType", AlarmOpenOmciEventType_OpenOmciEventType_name, AlarmOpenOmciEventType_OpenOmciEventType_value)
+	proto.RegisterType((*AlarmAttributeData)(nil), "omci.AlarmAttributeData")
+	proto.RegisterType((*AlarmInstanceData)(nil), "omci.AlarmInstanceData")
+	proto.RegisterType((*AlarmClassData)(nil), "omci.AlarmClassData")
+	proto.RegisterType((*AlarmManagedEntity)(nil), "omci.AlarmManagedEntity")
+	proto.RegisterType((*AlarmMessageType)(nil), "omci.AlarmMessageType")
+	proto.RegisterType((*AlarmDeviceData)(nil), "omci.AlarmDeviceData")
+	proto.RegisterType((*AlarmOpenOmciEventType)(nil), "omci.AlarmOpenOmciEventType")
+	proto.RegisterType((*AlarmOpenOmciEvent)(nil), "omci.AlarmOpenOmciEvent")
+}
+
+func init() { proto.RegisterFile("voltha_protos/omci_alarm_db.proto", fileDescriptor_8d41f1e38aadb08d) }
+
+var fileDescriptor_8d41f1e38aadb08d = []byte{
+	// 557 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xc1, 0x6a, 0xdb, 0x40,
+	0x10, 0x6d, 0x1c, 0x27, 0xb1, 0x27, 0xb6, 0x63, 0x6f, 0x43, 0xba, 0x6d, 0x0f, 0x4d, 0x44, 0x0b,
+	0x39, 0xb4, 0x32, 0xa4, 0x18, 0x0a, 0x85, 0x96, 0xc4, 0x09, 0xd4, 0x87, 0x12, 0x50, 0x72, 0xea,
+	0x45, 0xac, 0xa5, 0xa9, 0xbc, 0xa0, 0xdd, 0x75, 0xb5, 0x6b, 0x81, 0xff, 0xa7, 0x5f, 0xd7, 0xaf,
+	0x28, 0x1a, 0x49, 0xb6, 0x82, 0x21, 0x37, 0xbd, 0xa7, 0xb7, 0x6f, 0x66, 0xdf, 0x0c, 0x0b, 0x17,
+	0xb9, 0x49, 0xdd, 0x42, 0x84, 0xcb, 0xcc, 0x38, 0x63, 0xc7, 0x46, 0x45, 0x32, 0x14, 0xa9, 0xc8,
+	0x54, 0x18, 0xcf, 0x7d, 0x22, 0x59, 0xbb, 0x20, 0xbd, 0x6f, 0xc0, 0xae, 0x0b, 0xfe, 0xda, 0xb9,
+	0x4c, 0xce, 0x57, 0x0e, 0x6f, 0x85, 0x13, 0x8c, 0x41, 0x5b, 0x0b, 0x85, 0x7c, 0xef, 0x7c, 0xef,
+	0xb2, 0x1b, 0xd0, 0x37, 0x3b, 0x85, 0x83, 0x5c, 0xa4, 0x2b, 0xe4, 0x2d, 0x22, 0x4b, 0xe0, 0xfd,
+	0xdd, 0x83, 0x11, 0x19, 0xcc, 0xb4, 0x75, 0x42, 0x47, 0xe5, 0xf9, 0x77, 0x70, 0x2c, 0x2b, 0x1c,
+	0xca, 0x98, 0x6c, 0xfa, 0x01, 0xd4, 0xd4, 0x2c, 0x66, 0x1c, 0x8e, 0xa2, 0x0c, 0x85, 0xc3, 0xb8,
+	0xb2, 0xab, 0x21, 0x7b, 0x03, 0x1d, 0x65, 0x62, 0xf9, 0x5b, 0x62, 0xcc, 0xf7, 0xe9, 0xd7, 0x06,
+	0xb3, 0x2f, 0x00, 0xa2, 0xee, 0xd3, 0xf2, 0xf6, 0xf9, 0xfe, 0xe5, 0xf1, 0x15, 0xf7, 0x8b, 0x7b,
+	0xf8, 0xbb, 0x97, 0x08, 0x1a, 0x5a, 0x6f, 0x0e, 0x03, 0x52, 0x4c, 0x53, 0x61, 0x2d, 0xb5, 0xf8,
+	0x1a, 0x3a, 0x51, 0x01, 0xb6, 0xfd, 0x1d, 0x11, 0x9e, 0xc5, 0x6c, 0x02, 0xdd, 0xba, 0x55, 0xcb,
+	0x5b, 0x54, 0xe5, 0x55, 0xa3, 0x4a, 0xf3, 0xa6, 0xc1, 0x56, 0xe9, 0x4d, 0xab, 0x28, 0x7f, 0x0a,
+	0x2d, 0x12, 0x8c, 0xef, 0xb4, 0x93, 0x6e, 0xfd, 0x5c, 0x9d, 0x3a, 0xe5, 0xd6, 0x36, 0x65, 0x6f,
+	0x02, 0xc3, 0xd2, 0x04, 0xad, 0x15, 0x09, 0x3e, 0xae, 0x97, 0xc8, 0x2e, 0xa0, 0xa7, 0x4a, 0x18,
+	0xba, 0xf5, 0x12, 0x2b, 0x9b, 0x63, 0xb5, 0x95, 0x78, 0xff, 0x5a, 0x70, 0x42, 0xe7, 0x6e, 0x31,
+	0x97, 0xd5, 0x10, 0xde, 0x42, 0x37, 0x26, 0x54, 0x97, 0xee, 0x06, 0x9d, 0x92, 0x78, 0x76, 0x00,
+	0x3e, 0xbc, 0x4c, 0x85, 0x75, 0xd5, 0xba, 0x58, 0xfc, 0xb3, 0x42, 0x1d, 0x21, 0xcd, 0xa2, 0x1f,
+	0x8c, 0x8a, 0x5f, 0x54, 0xe8, 0xa1, 0xfa, 0xc1, 0xde, 0xc3, 0x80, 0xf4, 0x76, 0xad, 0xa3, 0xd0,
+	0x49, 0x85, 0xbc, 0x4d, 0x86, 0xbd, 0x82, 0x7d, 0x58, 0xeb, 0xe8, 0x51, 0x2a, 0x2c, 0xea, 0xe5,
+	0x98, 0x59, 0x69, 0x34, 0x3f, 0x28, 0x53, 0xa8, 0x20, 0xf3, 0xa1, 0x0c, 0x04, 0x2d, 0x3f, 0xa4,
+	0xac, 0x4f, 0x1b, 0x59, 0x6f, 0xe6, 0x15, 0xd4, 0x22, 0x36, 0x85, 0xa1, 0x2a, 0x13, 0x0e, 0xb1,
+	0x88, 0x58, 0xa2, 0xe5, 0x47, 0x3b, 0xab, 0xf0, 0x64, 0x08, 0xc1, 0x89, 0x6a, 0x40, 0x89, 0x96,
+	0x7d, 0x85, 0x7e, 0x33, 0x52, 0xcb, 0x3b, 0xe4, 0x70, 0xd6, 0x74, 0xd8, 0xc6, 0x1b, 0xf4, 0x1a,
+	0x59, 0x5b, 0xef, 0x3b, 0x9c, 0x91, 0xe2, 0x7e, 0x89, 0xfa, 0x5e, 0x45, 0xf2, 0x2e, 0x47, 0xed,
+	0x68, 0x0c, 0x1f, 0x60, 0xb4, 0x43, 0xb2, 0x21, 0xf4, 0xac, 0x13, 0x0e, 0xc3, 0x68, 0x21, 0x74,
+	0x82, 0xc3, 0x17, 0x5e, 0x5a, 0x6d, 0xca, 0x13, 0x2d, 0xbb, 0x81, 0xf6, 0x66, 0xbc, 0x83, 0x2b,
+	0xbf, 0xd1, 0xca, 0x8e, 0xa7, 0xbf, 0xc3, 0x04, 0x74, 0xb6, 0x58, 0xa9, 0x58, 0x38, 0x51, 0xaf,
+	0x54, 0xf1, 0x7d, 0xf3, 0x03, 0xb8, 0xc9, 0x12, 0xdf, 0x2c, 0x51, 0x47, 0x26, 0x8b, 0xfd, 0xf2,
+	0x69, 0x20, 0xfb, 0x5f, 0x1f, 0x13, 0xe9, 0x16, 0xab, 0xb9, 0x1f, 0x19, 0x35, 0xae, 0x05, 0xe3,
+	0x52, 0xf0, 0xa9, 0x7a, 0x3b, 0xf2, 0xc9, 0x38, 0x31, 0xf4, 0x82, 0xcc, 0x0f, 0x89, 0xfa, 0xfc,
+	0x3f, 0x00, 0x00, 0xff, 0xff, 0x2f, 0xc0, 0x0c, 0x6c, 0x5e, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_mib_db.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_mib_db.pb.go
new file mode 100644
index 0000000..05b146a
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_mib_db.pb.go
@@ -0,0 +1,512 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/omci_mib_db.proto
+
+package omci
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type OpenOmciEventType_Types int32
+
+const (
+	OpenOmciEventType_state_change OpenOmciEventType_Types = 0
+)
+
+var OpenOmciEventType_Types_name = map[int32]string{
+	0: "state_change",
+}
+
+var OpenOmciEventType_Types_value = map[string]int32{
+	"state_change": 0,
+}
+
+func (x OpenOmciEventType_Types) String() string {
+	return proto.EnumName(OpenOmciEventType_Types_name, int32(x))
+}
+
+func (OpenOmciEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{6, 0}
+}
+
+type MibAttributeData struct {
+	Name                 string   `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Value                string   `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MibAttributeData) Reset()         { *m = MibAttributeData{} }
+func (m *MibAttributeData) String() string { return proto.CompactTextString(m) }
+func (*MibAttributeData) ProtoMessage()    {}
+func (*MibAttributeData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{0}
+}
+
+func (m *MibAttributeData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibAttributeData.Unmarshal(m, b)
+}
+func (m *MibAttributeData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibAttributeData.Marshal(b, m, deterministic)
+}
+func (m *MibAttributeData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibAttributeData.Merge(m, src)
+}
+func (m *MibAttributeData) XXX_Size() int {
+	return xxx_messageInfo_MibAttributeData.Size(m)
+}
+func (m *MibAttributeData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibAttributeData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibAttributeData proto.InternalMessageInfo
+
+func (m *MibAttributeData) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *MibAttributeData) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+type MibInstanceData struct {
+	InstanceId           uint32              `protobuf:"varint,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+	Created              string              `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	Modified             string              `protobuf:"bytes,3,opt,name=modified,proto3" json:"modified,omitempty"`
+	Attributes           []*MibAttributeData `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *MibInstanceData) Reset()         { *m = MibInstanceData{} }
+func (m *MibInstanceData) String() string { return proto.CompactTextString(m) }
+func (*MibInstanceData) ProtoMessage()    {}
+func (*MibInstanceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{1}
+}
+
+func (m *MibInstanceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibInstanceData.Unmarshal(m, b)
+}
+func (m *MibInstanceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibInstanceData.Marshal(b, m, deterministic)
+}
+func (m *MibInstanceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibInstanceData.Merge(m, src)
+}
+func (m *MibInstanceData) XXX_Size() int {
+	return xxx_messageInfo_MibInstanceData.Size(m)
+}
+func (m *MibInstanceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibInstanceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibInstanceData proto.InternalMessageInfo
+
+func (m *MibInstanceData) GetInstanceId() uint32 {
+	if m != nil {
+		return m.InstanceId
+	}
+	return 0
+}
+
+func (m *MibInstanceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *MibInstanceData) GetModified() string {
+	if m != nil {
+		return m.Modified
+	}
+	return ""
+}
+
+func (m *MibInstanceData) GetAttributes() []*MibAttributeData {
+	if m != nil {
+		return m.Attributes
+	}
+	return nil
+}
+
+type MibClassData struct {
+	ClassId              uint32             `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Instances            []*MibInstanceData `protobuf:"bytes,2,rep,name=instances,proto3" json:"instances,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *MibClassData) Reset()         { *m = MibClassData{} }
+func (m *MibClassData) String() string { return proto.CompactTextString(m) }
+func (*MibClassData) ProtoMessage()    {}
+func (*MibClassData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{2}
+}
+
+func (m *MibClassData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibClassData.Unmarshal(m, b)
+}
+func (m *MibClassData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibClassData.Marshal(b, m, deterministic)
+}
+func (m *MibClassData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibClassData.Merge(m, src)
+}
+func (m *MibClassData) XXX_Size() int {
+	return xxx_messageInfo_MibClassData.Size(m)
+}
+func (m *MibClassData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibClassData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibClassData proto.InternalMessageInfo
+
+func (m *MibClassData) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *MibClassData) GetInstances() []*MibInstanceData {
+	if m != nil {
+		return m.Instances
+	}
+	return nil
+}
+
+type ManagedEntity struct {
+	ClassId              uint32   `protobuf:"varint,1,opt,name=class_id,json=classId,proto3" json:"class_id,omitempty"`
+	Name                 string   `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ManagedEntity) Reset()         { *m = ManagedEntity{} }
+func (m *ManagedEntity) String() string { return proto.CompactTextString(m) }
+func (*ManagedEntity) ProtoMessage()    {}
+func (*ManagedEntity) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{3}
+}
+
+func (m *ManagedEntity) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ManagedEntity.Unmarshal(m, b)
+}
+func (m *ManagedEntity) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ManagedEntity.Marshal(b, m, deterministic)
+}
+func (m *ManagedEntity) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ManagedEntity.Merge(m, src)
+}
+func (m *ManagedEntity) XXX_Size() int {
+	return xxx_messageInfo_ManagedEntity.Size(m)
+}
+func (m *ManagedEntity) XXX_DiscardUnknown() {
+	xxx_messageInfo_ManagedEntity.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ManagedEntity proto.InternalMessageInfo
+
+func (m *ManagedEntity) GetClassId() uint32 {
+	if m != nil {
+		return m.ClassId
+	}
+	return 0
+}
+
+func (m *ManagedEntity) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+type MessageType struct {
+	MessageType          uint32   `protobuf:"varint,1,opt,name=message_type,json=messageType,proto3" json:"message_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *MessageType) Reset()         { *m = MessageType{} }
+func (m *MessageType) String() string { return proto.CompactTextString(m) }
+func (*MessageType) ProtoMessage()    {}
+func (*MessageType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{4}
+}
+
+func (m *MessageType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MessageType.Unmarshal(m, b)
+}
+func (m *MessageType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MessageType.Marshal(b, m, deterministic)
+}
+func (m *MessageType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MessageType.Merge(m, src)
+}
+func (m *MessageType) XXX_Size() int {
+	return xxx_messageInfo_MessageType.Size(m)
+}
+func (m *MessageType) XXX_DiscardUnknown() {
+	xxx_messageInfo_MessageType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MessageType proto.InternalMessageInfo
+
+func (m *MessageType) GetMessageType() uint32 {
+	if m != nil {
+		return m.MessageType
+	}
+	return 0
+}
+
+type MibDeviceData struct {
+	DeviceId             string           `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Created              string           `protobuf:"bytes,2,opt,name=created,proto3" json:"created,omitempty"`
+	LastSyncTime         string           `protobuf:"bytes,3,opt,name=last_sync_time,json=lastSyncTime,proto3" json:"last_sync_time,omitempty"`
+	MibDataSync          uint32           `protobuf:"varint,4,opt,name=mib_data_sync,json=mibDataSync,proto3" json:"mib_data_sync,omitempty"`
+	Version              uint32           `protobuf:"varint,5,opt,name=version,proto3" json:"version,omitempty"`
+	Classes              []*MibClassData  `protobuf:"bytes,6,rep,name=classes,proto3" json:"classes,omitempty"`
+	ManagedEntities      []*ManagedEntity `protobuf:"bytes,7,rep,name=managed_entities,json=managedEntities,proto3" json:"managed_entities,omitempty"`
+	MessageTypes         []*MessageType   `protobuf:"bytes,8,rep,name=message_types,json=messageTypes,proto3" json:"message_types,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *MibDeviceData) Reset()         { *m = MibDeviceData{} }
+func (m *MibDeviceData) String() string { return proto.CompactTextString(m) }
+func (*MibDeviceData) ProtoMessage()    {}
+func (*MibDeviceData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{5}
+}
+
+func (m *MibDeviceData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MibDeviceData.Unmarshal(m, b)
+}
+func (m *MibDeviceData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MibDeviceData.Marshal(b, m, deterministic)
+}
+func (m *MibDeviceData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MibDeviceData.Merge(m, src)
+}
+func (m *MibDeviceData) XXX_Size() int {
+	return xxx_messageInfo_MibDeviceData.Size(m)
+}
+func (m *MibDeviceData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MibDeviceData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MibDeviceData proto.InternalMessageInfo
+
+func (m *MibDeviceData) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *MibDeviceData) GetCreated() string {
+	if m != nil {
+		return m.Created
+	}
+	return ""
+}
+
+func (m *MibDeviceData) GetLastSyncTime() string {
+	if m != nil {
+		return m.LastSyncTime
+	}
+	return ""
+}
+
+func (m *MibDeviceData) GetMibDataSync() uint32 {
+	if m != nil {
+		return m.MibDataSync
+	}
+	return 0
+}
+
+func (m *MibDeviceData) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *MibDeviceData) GetClasses() []*MibClassData {
+	if m != nil {
+		return m.Classes
+	}
+	return nil
+}
+
+func (m *MibDeviceData) GetManagedEntities() []*ManagedEntity {
+	if m != nil {
+		return m.ManagedEntities
+	}
+	return nil
+}
+
+func (m *MibDeviceData) GetMessageTypes() []*MessageType {
+	if m != nil {
+		return m.MessageTypes
+	}
+	return nil
+}
+
+type OpenOmciEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OpenOmciEventType) Reset()         { *m = OpenOmciEventType{} }
+func (m *OpenOmciEventType) String() string { return proto.CompactTextString(m) }
+func (*OpenOmciEventType) ProtoMessage()    {}
+func (*OpenOmciEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{6}
+}
+
+func (m *OpenOmciEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OpenOmciEventType.Unmarshal(m, b)
+}
+func (m *OpenOmciEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OpenOmciEventType.Marshal(b, m, deterministic)
+}
+func (m *OpenOmciEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OpenOmciEventType.Merge(m, src)
+}
+func (m *OpenOmciEventType) XXX_Size() int {
+	return xxx_messageInfo_OpenOmciEventType.Size(m)
+}
+func (m *OpenOmciEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_OpenOmciEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpenOmciEventType proto.InternalMessageInfo
+
+type OpenOmciEvent struct {
+	Type                 OpenOmciEventType_Types `protobuf:"varint,1,opt,name=type,proto3,enum=omci.OpenOmciEventType_Types" json:"type,omitempty"`
+	Data                 string                  `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *OpenOmciEvent) Reset()         { *m = OpenOmciEvent{} }
+func (m *OpenOmciEvent) String() string { return proto.CompactTextString(m) }
+func (*OpenOmciEvent) ProtoMessage()    {}
+func (*OpenOmciEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_4fa402a2df36dcc1, []int{7}
+}
+
+func (m *OpenOmciEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OpenOmciEvent.Unmarshal(m, b)
+}
+func (m *OpenOmciEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OpenOmciEvent.Marshal(b, m, deterministic)
+}
+func (m *OpenOmciEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OpenOmciEvent.Merge(m, src)
+}
+func (m *OpenOmciEvent) XXX_Size() int {
+	return xxx_messageInfo_OpenOmciEvent.Size(m)
+}
+func (m *OpenOmciEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_OpenOmciEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OpenOmciEvent proto.InternalMessageInfo
+
+func (m *OpenOmciEvent) GetType() OpenOmciEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return OpenOmciEventType_state_change
+}
+
+func (m *OpenOmciEvent) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+func init() {
+	proto.RegisterEnum("omci.OpenOmciEventType_Types", OpenOmciEventType_Types_name, OpenOmciEventType_Types_value)
+	proto.RegisterType((*MibAttributeData)(nil), "omci.MibAttributeData")
+	proto.RegisterType((*MibInstanceData)(nil), "omci.MibInstanceData")
+	proto.RegisterType((*MibClassData)(nil), "omci.MibClassData")
+	proto.RegisterType((*ManagedEntity)(nil), "omci.ManagedEntity")
+	proto.RegisterType((*MessageType)(nil), "omci.MessageType")
+	proto.RegisterType((*MibDeviceData)(nil), "omci.MibDeviceData")
+	proto.RegisterType((*OpenOmciEventType)(nil), "omci.OpenOmciEventType")
+	proto.RegisterType((*OpenOmciEvent)(nil), "omci.OpenOmciEvent")
+}
+
+func init() { proto.RegisterFile("voltha_protos/omci_mib_db.proto", fileDescriptor_4fa402a2df36dcc1) }
+
+var fileDescriptor_4fa402a2df36dcc1 = []byte{
+	// 548 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x93, 0x4d, 0x6f, 0xd3, 0x30,
+	0x18, 0xc7, 0x69, 0xd7, 0xae, 0xed, 0xd3, 0x66, 0xeb, 0xcc, 0x8b, 0x3c, 0x10, 0xda, 0x88, 0x38,
+	0xec, 0x30, 0x52, 0xd8, 0xc4, 0x4e, 0x68, 0x12, 0xb0, 0x49, 0xf4, 0x10, 0x4d, 0x0a, 0x13, 0x07,
+	0x0e, 0x44, 0x4e, 0xf2, 0x90, 0x5a, 0xaa, 0xed, 0x2a, 0x76, 0x23, 0xf5, 0xdb, 0xf0, 0x91, 0xf8,
+	0x48, 0xc8, 0x4e, 0xd2, 0x06, 0x90, 0x76, 0xcb, 0xff, 0xc9, 0xff, 0x79, 0xfb, 0xd9, 0x86, 0x93,
+	0x52, 0x2d, 0xcd, 0x82, 0xc5, 0xab, 0x42, 0x19, 0xa5, 0x67, 0x4a, 0xa4, 0x3c, 0x16, 0x3c, 0x89,
+	0xb3, 0x24, 0x70, 0x21, 0xd2, 0xb3, 0x21, 0xff, 0x03, 0x4c, 0x43, 0x9e, 0x7c, 0x34, 0xa6, 0xe0,
+	0xc9, 0xda, 0xe0, 0x0d, 0x33, 0x8c, 0x10, 0xe8, 0x49, 0x26, 0x90, 0x76, 0x4e, 0x3b, 0x67, 0xa3,
+	0xc8, 0x7d, 0x93, 0x27, 0xd0, 0x2f, 0xd9, 0x72, 0x8d, 0xb4, 0xeb, 0x82, 0x95, 0xf0, 0x7f, 0x75,
+	0xe0, 0x30, 0xe4, 0xc9, 0x5c, 0x6a, 0xc3, 0x64, 0x5a, 0x65, 0x9f, 0xc0, 0x98, 0xd7, 0x3a, 0xe6,
+	0x99, 0x2b, 0xe2, 0x45, 0xd0, 0x84, 0xe6, 0x19, 0xa1, 0x30, 0x48, 0x0b, 0x64, 0x06, 0xb3, 0xba,
+	0x58, 0x23, 0xc9, 0x73, 0x18, 0x0a, 0x95, 0xf1, 0x9f, 0x1c, 0x33, 0xba, 0xe7, 0x7e, 0x6d, 0x35,
+	0xb9, 0x02, 0x60, 0xcd, 0x94, 0x9a, 0xf6, 0x4e, 0xf7, 0xce, 0xc6, 0x17, 0xcf, 0x02, 0xbb, 0x43,
+	0xf0, 0xef, 0x02, 0x51, 0xcb, 0xe9, 0xff, 0x80, 0x49, 0xc8, 0x93, 0xcf, 0x4b, 0xa6, 0xb5, 0x1b,
+	0xef, 0x18, 0x86, 0xa9, 0x15, 0xbb, 0xd9, 0x06, 0x4e, 0xcf, 0x33, 0x72, 0x09, 0xa3, 0x66, 0x4c,
+	0x4d, 0xbb, 0xae, 0xc3, 0xd3, 0x6d, 0x87, 0xf6, 0x8e, 0xd1, 0xce, 0xe7, 0x5f, 0x83, 0x17, 0x32,
+	0xc9, 0x72, 0xcc, 0x6e, 0xa5, 0xe1, 0x66, 0xf3, 0x50, 0x83, 0x06, 0x6c, 0x77, 0x07, 0xd6, 0x7f,
+	0x0b, 0xe3, 0x10, 0xb5, 0x66, 0x39, 0xde, 0x6f, 0x56, 0x48, 0x5e, 0xc1, 0x44, 0x54, 0x32, 0x36,
+	0x9b, 0x15, 0xd6, 0x15, 0xc6, 0x62, 0x67, 0xf1, 0x7f, 0x77, 0xc1, 0x0b, 0x79, 0x72, 0x83, 0x25,
+	0xaf, 0x91, 0xbf, 0x80, 0x51, 0xe6, 0x54, 0xd3, 0x73, 0x14, 0x0d, 0xab, 0xc0, 0x83, 0xb8, 0x5f,
+	0xc3, 0xc1, 0x92, 0x69, 0x13, 0xeb, 0x8d, 0x4c, 0x63, 0xc3, 0x05, 0xd6, 0xd0, 0x27, 0x36, 0xfa,
+	0x75, 0x23, 0xd3, 0x7b, 0x2e, 0x90, 0xf8, 0xe0, 0xb9, 0x7b, 0xc3, 0x0c, 0x73, 0x4e, 0xda, 0xab,
+	0x47, 0xe2, 0x89, 0x6d, 0x6e, 0x7d, 0xb6, 0x47, 0x89, 0x85, 0xe6, 0x4a, 0xd2, 0x7e, 0xb5, 0x72,
+	0x2d, 0xc9, 0x39, 0x54, 0xdb, 0xa3, 0xa6, 0xfb, 0x8e, 0x28, 0xd9, 0x12, 0xdd, 0x9e, 0x49, 0xd4,
+	0x58, 0xc8, 0x35, 0x4c, 0x45, 0x05, 0x33, 0x46, 0x4b, 0x93, 0xa3, 0xa6, 0x03, 0x97, 0xf6, 0xb8,
+	0x4e, 0x6b, 0xa3, 0x8e, 0x0e, 0x45, 0x4b, 0x72, 0xd4, 0xe4, 0x0a, 0xbc, 0x36, 0x3d, 0x4d, 0x87,
+	0x2e, 0xf9, 0xa8, 0x4e, 0xde, 0x41, 0x8c, 0x26, 0x2d, 0xa2, 0xda, 0x0f, 0xe0, 0xe8, 0x6e, 0x85,
+	0xf2, 0x4e, 0xa4, 0xfc, 0xb6, 0x44, 0x69, 0x1c, 0xe7, 0x63, 0xe8, 0xbb, 0xbf, 0x64, 0x0a, 0x13,
+	0x6d, 0x98, 0xc1, 0x38, 0x5d, 0x30, 0x99, 0xe3, 0xf4, 0x91, 0xff, 0x0d, 0xbc, 0xbf, 0xfc, 0xe4,
+	0x1d, 0xf4, 0xb6, 0xc7, 0x75, 0x70, 0xf1, 0xb2, 0xea, 0xf7, 0x5f, 0xc9, 0xc0, 0xd5, 0x8b, 0x9c,
+	0xd5, 0x5e, 0x06, 0xcb, 0xb4, 0xb9, 0x0c, 0xf6, 0xfb, 0xd3, 0x17, 0xa0, 0xaa, 0xc8, 0x03, 0xb5,
+	0x42, 0x99, 0xaa, 0x22, 0x0b, 0xaa, 0x37, 0xec, 0xaa, 0x7d, 0x3f, 0xcf, 0xb9, 0x59, 0xac, 0x93,
+	0x20, 0x55, 0x62, 0xd6, 0x18, 0x66, 0x95, 0xe1, 0x4d, 0xfd, 0xc8, 0xcb, 0xf7, 0xb3, 0x5c, 0xb9,
+	0xa7, 0x9e, 0xec, 0xbb, 0xd0, 0xe5, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc6, 0x6b, 0x99, 0x13,
+	0x07, 0x04, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_test.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_test.pb.go
new file mode 100644
index 0000000..6d32ae0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/omci/omci_test.pb.go
@@ -0,0 +1,159 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/omci_test.proto
+
+package omci
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type TestResponse_TestResponseResult int32
+
+const (
+	TestResponse_SUCCESS TestResponse_TestResponseResult = 0
+	TestResponse_FAILURE TestResponse_TestResponseResult = 1
+)
+
+var TestResponse_TestResponseResult_name = map[int32]string{
+	0: "SUCCESS",
+	1: "FAILURE",
+}
+
+var TestResponse_TestResponseResult_value = map[string]int32{
+	"SUCCESS": 0,
+	"FAILURE": 1,
+}
+
+func (x TestResponse_TestResponseResult) String() string {
+	return proto.EnumName(TestResponse_TestResponseResult_name, int32(x))
+}
+
+func (TestResponse_TestResponseResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_146dc5f97baf9397, []int{1, 0}
+}
+
+type OmciTestRequest struct {
+	Id                   string   `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Uuid                 string   `protobuf:"bytes,2,opt,name=uuid,proto3" json:"uuid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OmciTestRequest) Reset()         { *m = OmciTestRequest{} }
+func (m *OmciTestRequest) String() string { return proto.CompactTextString(m) }
+func (*OmciTestRequest) ProtoMessage()    {}
+func (*OmciTestRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_146dc5f97baf9397, []int{0}
+}
+
+func (m *OmciTestRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OmciTestRequest.Unmarshal(m, b)
+}
+func (m *OmciTestRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OmciTestRequest.Marshal(b, m, deterministic)
+}
+func (m *OmciTestRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OmciTestRequest.Merge(m, src)
+}
+func (m *OmciTestRequest) XXX_Size() int {
+	return xxx_messageInfo_OmciTestRequest.Size(m)
+}
+func (m *OmciTestRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OmciTestRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OmciTestRequest proto.InternalMessageInfo
+
+func (m *OmciTestRequest) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *OmciTestRequest) GetUuid() string {
+	if m != nil {
+		return m.Uuid
+	}
+	return ""
+}
+
+type TestResponse struct {
+	Result               TestResponse_TestResponseResult `protobuf:"varint,1,opt,name=result,proto3,enum=omci.TestResponse_TestResponseResult" json:"result,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *TestResponse) Reset()         { *m = TestResponse{} }
+func (m *TestResponse) String() string { return proto.CompactTextString(m) }
+func (*TestResponse) ProtoMessage()    {}
+func (*TestResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_146dc5f97baf9397, []int{1}
+}
+
+func (m *TestResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_TestResponse.Unmarshal(m, b)
+}
+func (m *TestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_TestResponse.Marshal(b, m, deterministic)
+}
+func (m *TestResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_TestResponse.Merge(m, src)
+}
+func (m *TestResponse) XXX_Size() int {
+	return xxx_messageInfo_TestResponse.Size(m)
+}
+func (m *TestResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_TestResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_TestResponse proto.InternalMessageInfo
+
+func (m *TestResponse) GetResult() TestResponse_TestResponseResult {
+	if m != nil {
+		return m.Result
+	}
+	return TestResponse_SUCCESS
+}
+
+func init() {
+	proto.RegisterEnum("omci.TestResponse_TestResponseResult", TestResponse_TestResponseResult_name, TestResponse_TestResponseResult_value)
+	proto.RegisterType((*OmciTestRequest)(nil), "omci.OmciTestRequest")
+	proto.RegisterType((*TestResponse)(nil), "omci.TestResponse")
+}
+
+func init() { proto.RegisterFile("voltha_protos/omci_test.proto", fileDescriptor_146dc5f97baf9397) }
+
+var fileDescriptor_146dc5f97baf9397 = []byte{
+	// 230 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x2d, 0xcb, 0xcf, 0x29,
+	0xc9, 0x48, 0x8c, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x2f, 0xd6, 0xcf, 0xcf, 0x4d, 0xce, 0x8c, 0x2f,
+	0x49, 0x2d, 0x2e, 0xd1, 0x03, 0x0b, 0x08, 0xb1, 0x80, 0x04, 0x94, 0x4c, 0xb9, 0xf8, 0xfd, 0x73,
+	0x93, 0x33, 0x43, 0x52, 0x8b, 0x4b, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0xf8, 0xb8,
+	0x98, 0x32, 0x53, 0x24, 0x18, 0x15, 0x18, 0x35, 0x38, 0x83, 0x98, 0x32, 0x53, 0x84, 0x84, 0xb8,
+	0x58, 0x4a, 0x4b, 0x33, 0x53, 0x24, 0x98, 0xc0, 0x22, 0x60, 0xb6, 0x52, 0x2d, 0x17, 0x0f, 0x44,
+	0x4b, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x90, 0x2d, 0x17, 0x5b, 0x51, 0x6a, 0x71, 0x69, 0x4e,
+	0x09, 0x58, 0x1f, 0x9f, 0x91, 0xaa, 0x1e, 0xc8, 0x74, 0x3d, 0x64, 0x35, 0x28, 0x9c, 0x20, 0xb0,
+	0xe2, 0x20, 0xa8, 0x26, 0x25, 0x3d, 0x2e, 0x21, 0x4c, 0x59, 0x21, 0x6e, 0x2e, 0xf6, 0xe0, 0x50,
+	0x67, 0x67, 0xd7, 0xe0, 0x60, 0x01, 0x06, 0x10, 0xc7, 0xcd, 0xd1, 0xd3, 0x27, 0x34, 0xc8, 0x55,
+	0x80, 0xd1, 0xc9, 0x83, 0x4b, 0x22, 0xbf, 0x28, 0x5d, 0x2f, 0xbf, 0x20, 0x35, 0x2f, 0x39, 0xbf,
+	0x28, 0x45, 0x0f, 0xe2, 0x53, 0xb0, 0x9d, 0x51, 0x3a, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a,
+	0xc9, 0xf9, 0xb9, 0xfa, 0x30, 0x05, 0xfa, 0x10, 0x05, 0xba, 0xd0, 0xa0, 0x28, 0x33, 0xd5, 0x4f,
+	0xcf, 0x07, 0x07, 0x48, 0x12, 0x1b, 0x58, 0xc8, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x85, 0xc9,
+	0x07, 0x50, 0x2d, 0x01, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/openflow_13/openflow_13.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/openflow_13/openflow_13.pb.go
new file mode 100644
index 0000000..18505f0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/openflow_13/openflow_13.pb.go
@@ -0,0 +1,9995 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/openflow_13.proto
+
+package openflow_13
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// Port numbering. Ports are numbered starting from 1.
+type OfpPortNo int32
+
+const (
+	OfpPortNo_OFPP_INVALID OfpPortNo = 0
+	// Maximum number of physical and logical switch ports.
+	OfpPortNo_OFPP_MAX OfpPortNo = 2147483392
+	// Reserved OpenFlow Port (fake output "ports").
+	OfpPortNo_OFPP_IN_PORT    OfpPortNo = 2147483640
+	OfpPortNo_OFPP_TABLE      OfpPortNo = 2147483641
+	OfpPortNo_OFPP_NORMAL     OfpPortNo = 2147483642
+	OfpPortNo_OFPP_FLOOD      OfpPortNo = 2147483643
+	OfpPortNo_OFPP_ALL        OfpPortNo = 2147483644
+	OfpPortNo_OFPP_CONTROLLER OfpPortNo = 2147483645
+	OfpPortNo_OFPP_LOCAL      OfpPortNo = 2147483646
+	OfpPortNo_OFPP_ANY        OfpPortNo = 2147483647
+)
+
+var OfpPortNo_name = map[int32]string{
+	0:          "OFPP_INVALID",
+	2147483392: "OFPP_MAX",
+	2147483640: "OFPP_IN_PORT",
+	2147483641: "OFPP_TABLE",
+	2147483642: "OFPP_NORMAL",
+	2147483643: "OFPP_FLOOD",
+	2147483644: "OFPP_ALL",
+	2147483645: "OFPP_CONTROLLER",
+	2147483646: "OFPP_LOCAL",
+	2147483647: "OFPP_ANY",
+}
+
+var OfpPortNo_value = map[string]int32{
+	"OFPP_INVALID":    0,
+	"OFPP_MAX":        2147483392,
+	"OFPP_IN_PORT":    2147483640,
+	"OFPP_TABLE":      2147483641,
+	"OFPP_NORMAL":     2147483642,
+	"OFPP_FLOOD":      2147483643,
+	"OFPP_ALL":        2147483644,
+	"OFPP_CONTROLLER": 2147483645,
+	"OFPP_LOCAL":      2147483646,
+	"OFPP_ANY":        2147483647,
+}
+
+func (x OfpPortNo) String() string {
+	return proto.EnumName(OfpPortNo_name, int32(x))
+}
+
+func (OfpPortNo) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{0}
+}
+
+type OfpType int32
+
+const (
+	// Immutable messages.
+	OfpType_OFPT_HELLO        OfpType = 0
+	OfpType_OFPT_ERROR        OfpType = 1
+	OfpType_OFPT_ECHO_REQUEST OfpType = 2
+	OfpType_OFPT_ECHO_REPLY   OfpType = 3
+	OfpType_OFPT_EXPERIMENTER OfpType = 4
+	// Switch configuration messages.
+	OfpType_OFPT_FEATURES_REQUEST   OfpType = 5
+	OfpType_OFPT_FEATURES_REPLY     OfpType = 6
+	OfpType_OFPT_GET_CONFIG_REQUEST OfpType = 7
+	OfpType_OFPT_GET_CONFIG_REPLY   OfpType = 8
+	OfpType_OFPT_SET_CONFIG         OfpType = 9
+	// Asynchronous messages.
+	OfpType_OFPT_PACKET_IN    OfpType = 10
+	OfpType_OFPT_FLOW_REMOVED OfpType = 11
+	OfpType_OFPT_PORT_STATUS  OfpType = 12
+	// Controller command messages.
+	OfpType_OFPT_PACKET_OUT OfpType = 13
+	OfpType_OFPT_FLOW_MOD   OfpType = 14
+	OfpType_OFPT_GROUP_MOD  OfpType = 15
+	OfpType_OFPT_PORT_MOD   OfpType = 16
+	OfpType_OFPT_TABLE_MOD  OfpType = 17
+	// Multipart messages.
+	OfpType_OFPT_MULTIPART_REQUEST OfpType = 18
+	OfpType_OFPT_MULTIPART_REPLY   OfpType = 19
+	// Barrier messages.
+	OfpType_OFPT_BARRIER_REQUEST OfpType = 20
+	OfpType_OFPT_BARRIER_REPLY   OfpType = 21
+	// Queue Configuration messages.
+	OfpType_OFPT_QUEUE_GET_CONFIG_REQUEST OfpType = 22
+	OfpType_OFPT_QUEUE_GET_CONFIG_REPLY   OfpType = 23
+	// Controller role change request messages.
+	OfpType_OFPT_ROLE_REQUEST OfpType = 24
+	OfpType_OFPT_ROLE_REPLY   OfpType = 25
+	// Asynchronous message configuration.
+	OfpType_OFPT_GET_ASYNC_REQUEST OfpType = 26
+	OfpType_OFPT_GET_ASYNC_REPLY   OfpType = 27
+	OfpType_OFPT_SET_ASYNC         OfpType = 28
+	// Meters and rate limiters configuration messages.
+	OfpType_OFPT_METER_MOD OfpType = 29
+)
+
+var OfpType_name = map[int32]string{
+	0:  "OFPT_HELLO",
+	1:  "OFPT_ERROR",
+	2:  "OFPT_ECHO_REQUEST",
+	3:  "OFPT_ECHO_REPLY",
+	4:  "OFPT_EXPERIMENTER",
+	5:  "OFPT_FEATURES_REQUEST",
+	6:  "OFPT_FEATURES_REPLY",
+	7:  "OFPT_GET_CONFIG_REQUEST",
+	8:  "OFPT_GET_CONFIG_REPLY",
+	9:  "OFPT_SET_CONFIG",
+	10: "OFPT_PACKET_IN",
+	11: "OFPT_FLOW_REMOVED",
+	12: "OFPT_PORT_STATUS",
+	13: "OFPT_PACKET_OUT",
+	14: "OFPT_FLOW_MOD",
+	15: "OFPT_GROUP_MOD",
+	16: "OFPT_PORT_MOD",
+	17: "OFPT_TABLE_MOD",
+	18: "OFPT_MULTIPART_REQUEST",
+	19: "OFPT_MULTIPART_REPLY",
+	20: "OFPT_BARRIER_REQUEST",
+	21: "OFPT_BARRIER_REPLY",
+	22: "OFPT_QUEUE_GET_CONFIG_REQUEST",
+	23: "OFPT_QUEUE_GET_CONFIG_REPLY",
+	24: "OFPT_ROLE_REQUEST",
+	25: "OFPT_ROLE_REPLY",
+	26: "OFPT_GET_ASYNC_REQUEST",
+	27: "OFPT_GET_ASYNC_REPLY",
+	28: "OFPT_SET_ASYNC",
+	29: "OFPT_METER_MOD",
+}
+
+var OfpType_value = map[string]int32{
+	"OFPT_HELLO":                    0,
+	"OFPT_ERROR":                    1,
+	"OFPT_ECHO_REQUEST":             2,
+	"OFPT_ECHO_REPLY":               3,
+	"OFPT_EXPERIMENTER":             4,
+	"OFPT_FEATURES_REQUEST":         5,
+	"OFPT_FEATURES_REPLY":           6,
+	"OFPT_GET_CONFIG_REQUEST":       7,
+	"OFPT_GET_CONFIG_REPLY":         8,
+	"OFPT_SET_CONFIG":               9,
+	"OFPT_PACKET_IN":                10,
+	"OFPT_FLOW_REMOVED":             11,
+	"OFPT_PORT_STATUS":              12,
+	"OFPT_PACKET_OUT":               13,
+	"OFPT_FLOW_MOD":                 14,
+	"OFPT_GROUP_MOD":                15,
+	"OFPT_PORT_MOD":                 16,
+	"OFPT_TABLE_MOD":                17,
+	"OFPT_MULTIPART_REQUEST":        18,
+	"OFPT_MULTIPART_REPLY":          19,
+	"OFPT_BARRIER_REQUEST":          20,
+	"OFPT_BARRIER_REPLY":            21,
+	"OFPT_QUEUE_GET_CONFIG_REQUEST": 22,
+	"OFPT_QUEUE_GET_CONFIG_REPLY":   23,
+	"OFPT_ROLE_REQUEST":             24,
+	"OFPT_ROLE_REPLY":               25,
+	"OFPT_GET_ASYNC_REQUEST":        26,
+	"OFPT_GET_ASYNC_REPLY":          27,
+	"OFPT_SET_ASYNC":                28,
+	"OFPT_METER_MOD":                29,
+}
+
+func (x OfpType) String() string {
+	return proto.EnumName(OfpType_name, int32(x))
+}
+
+func (OfpType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{1}
+}
+
+// Hello elements types.
+type OfpHelloElemType int32
+
+const (
+	OfpHelloElemType_OFPHET_INVALID       OfpHelloElemType = 0
+	OfpHelloElemType_OFPHET_VERSIONBITMAP OfpHelloElemType = 1
+)
+
+var OfpHelloElemType_name = map[int32]string{
+	0: "OFPHET_INVALID",
+	1: "OFPHET_VERSIONBITMAP",
+}
+
+var OfpHelloElemType_value = map[string]int32{
+	"OFPHET_INVALID":       0,
+	"OFPHET_VERSIONBITMAP": 1,
+}
+
+func (x OfpHelloElemType) String() string {
+	return proto.EnumName(OfpHelloElemType_name, int32(x))
+}
+
+func (OfpHelloElemType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{2}
+}
+
+type OfpConfigFlags int32
+
+const (
+	// Handling of IP fragments.
+	OfpConfigFlags_OFPC_FRAG_NORMAL OfpConfigFlags = 0
+	OfpConfigFlags_OFPC_FRAG_DROP   OfpConfigFlags = 1
+	OfpConfigFlags_OFPC_FRAG_REASM  OfpConfigFlags = 2
+	OfpConfigFlags_OFPC_FRAG_MASK   OfpConfigFlags = 3
+)
+
+var OfpConfigFlags_name = map[int32]string{
+	0: "OFPC_FRAG_NORMAL",
+	1: "OFPC_FRAG_DROP",
+	2: "OFPC_FRAG_REASM",
+	3: "OFPC_FRAG_MASK",
+}
+
+var OfpConfigFlags_value = map[string]int32{
+	"OFPC_FRAG_NORMAL": 0,
+	"OFPC_FRAG_DROP":   1,
+	"OFPC_FRAG_REASM":  2,
+	"OFPC_FRAG_MASK":   3,
+}
+
+func (x OfpConfigFlags) String() string {
+	return proto.EnumName(OfpConfigFlags_name, int32(x))
+}
+
+func (OfpConfigFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{3}
+}
+
+// Flags to configure the table. Reserved for future use.
+type OfpTableConfig int32
+
+const (
+	OfpTableConfig_OFPTC_INVALID         OfpTableConfig = 0
+	OfpTableConfig_OFPTC_DEPRECATED_MASK OfpTableConfig = 3
+)
+
+var OfpTableConfig_name = map[int32]string{
+	0: "OFPTC_INVALID",
+	3: "OFPTC_DEPRECATED_MASK",
+}
+
+var OfpTableConfig_value = map[string]int32{
+	"OFPTC_INVALID":         0,
+	"OFPTC_DEPRECATED_MASK": 3,
+}
+
+func (x OfpTableConfig) String() string {
+	return proto.EnumName(OfpTableConfig_name, int32(x))
+}
+
+func (OfpTableConfig) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{4}
+}
+
+// Table numbering. Tables can use any number up to OFPT_MAX.
+type OfpTable int32
+
+const (
+	OfpTable_OFPTT_INVALID OfpTable = 0
+	// Last usable table number.
+	OfpTable_OFPTT_MAX OfpTable = 254
+	// Fake tables.
+	OfpTable_OFPTT_ALL OfpTable = 255
+)
+
+var OfpTable_name = map[int32]string{
+	0:   "OFPTT_INVALID",
+	254: "OFPTT_MAX",
+	255: "OFPTT_ALL",
+}
+
+var OfpTable_value = map[string]int32{
+	"OFPTT_INVALID": 0,
+	"OFPTT_MAX":     254,
+	"OFPTT_ALL":     255,
+}
+
+func (x OfpTable) String() string {
+	return proto.EnumName(OfpTable_name, int32(x))
+}
+
+func (OfpTable) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{5}
+}
+
+// Capabilities supported by the datapath.
+type OfpCapabilities int32
+
+const (
+	OfpCapabilities_OFPC_INVALID      OfpCapabilities = 0
+	OfpCapabilities_OFPC_FLOW_STATS   OfpCapabilities = 1
+	OfpCapabilities_OFPC_TABLE_STATS  OfpCapabilities = 2
+	OfpCapabilities_OFPC_PORT_STATS   OfpCapabilities = 4
+	OfpCapabilities_OFPC_GROUP_STATS  OfpCapabilities = 8
+	OfpCapabilities_OFPC_IP_REASM     OfpCapabilities = 32
+	OfpCapabilities_OFPC_QUEUE_STATS  OfpCapabilities = 64
+	OfpCapabilities_OFPC_PORT_BLOCKED OfpCapabilities = 256
+)
+
+var OfpCapabilities_name = map[int32]string{
+	0:   "OFPC_INVALID",
+	1:   "OFPC_FLOW_STATS",
+	2:   "OFPC_TABLE_STATS",
+	4:   "OFPC_PORT_STATS",
+	8:   "OFPC_GROUP_STATS",
+	32:  "OFPC_IP_REASM",
+	64:  "OFPC_QUEUE_STATS",
+	256: "OFPC_PORT_BLOCKED",
+}
+
+var OfpCapabilities_value = map[string]int32{
+	"OFPC_INVALID":      0,
+	"OFPC_FLOW_STATS":   1,
+	"OFPC_TABLE_STATS":  2,
+	"OFPC_PORT_STATS":   4,
+	"OFPC_GROUP_STATS":  8,
+	"OFPC_IP_REASM":     32,
+	"OFPC_QUEUE_STATS":  64,
+	"OFPC_PORT_BLOCKED": 256,
+}
+
+func (x OfpCapabilities) String() string {
+	return proto.EnumName(OfpCapabilities_name, int32(x))
+}
+
+func (OfpCapabilities) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{6}
+}
+
+// Flags to indicate behavior of the physical port.  These flags are
+// used in ofp_port to describe the current configuration.  They are
+// used in the ofp_port_mod message to configure the port's behavior.
+type OfpPortConfig int32
+
+const (
+	OfpPortConfig_OFPPC_INVALID      OfpPortConfig = 0
+	OfpPortConfig_OFPPC_PORT_DOWN    OfpPortConfig = 1
+	OfpPortConfig_OFPPC_NO_RECV      OfpPortConfig = 4
+	OfpPortConfig_OFPPC_NO_FWD       OfpPortConfig = 32
+	OfpPortConfig_OFPPC_NO_PACKET_IN OfpPortConfig = 64
+)
+
+var OfpPortConfig_name = map[int32]string{
+	0:  "OFPPC_INVALID",
+	1:  "OFPPC_PORT_DOWN",
+	4:  "OFPPC_NO_RECV",
+	32: "OFPPC_NO_FWD",
+	64: "OFPPC_NO_PACKET_IN",
+}
+
+var OfpPortConfig_value = map[string]int32{
+	"OFPPC_INVALID":      0,
+	"OFPPC_PORT_DOWN":    1,
+	"OFPPC_NO_RECV":      4,
+	"OFPPC_NO_FWD":       32,
+	"OFPPC_NO_PACKET_IN": 64,
+}
+
+func (x OfpPortConfig) String() string {
+	return proto.EnumName(OfpPortConfig_name, int32(x))
+}
+
+func (OfpPortConfig) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{7}
+}
+
+// Current state of the physical port.  These are not configurable from
+// the controller.
+type OfpPortState int32
+
+const (
+	OfpPortState_OFPPS_INVALID   OfpPortState = 0
+	OfpPortState_OFPPS_LINK_DOWN OfpPortState = 1
+	OfpPortState_OFPPS_BLOCKED   OfpPortState = 2
+	OfpPortState_OFPPS_LIVE      OfpPortState = 4
+)
+
+var OfpPortState_name = map[int32]string{
+	0: "OFPPS_INVALID",
+	1: "OFPPS_LINK_DOWN",
+	2: "OFPPS_BLOCKED",
+	4: "OFPPS_LIVE",
+}
+
+var OfpPortState_value = map[string]int32{
+	"OFPPS_INVALID":   0,
+	"OFPPS_LINK_DOWN": 1,
+	"OFPPS_BLOCKED":   2,
+	"OFPPS_LIVE":      4,
+}
+
+func (x OfpPortState) String() string {
+	return proto.EnumName(OfpPortState_name, int32(x))
+}
+
+func (OfpPortState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{8}
+}
+
+// Features of ports available in a datapath.
+type OfpPortFeatures int32
+
+const (
+	OfpPortFeatures_OFPPF_INVALID    OfpPortFeatures = 0
+	OfpPortFeatures_OFPPF_10MB_HD    OfpPortFeatures = 1
+	OfpPortFeatures_OFPPF_10MB_FD    OfpPortFeatures = 2
+	OfpPortFeatures_OFPPF_100MB_HD   OfpPortFeatures = 4
+	OfpPortFeatures_OFPPF_100MB_FD   OfpPortFeatures = 8
+	OfpPortFeatures_OFPPF_1GB_HD     OfpPortFeatures = 16
+	OfpPortFeatures_OFPPF_1GB_FD     OfpPortFeatures = 32
+	OfpPortFeatures_OFPPF_10GB_FD    OfpPortFeatures = 64
+	OfpPortFeatures_OFPPF_40GB_FD    OfpPortFeatures = 128
+	OfpPortFeatures_OFPPF_100GB_FD   OfpPortFeatures = 256
+	OfpPortFeatures_OFPPF_1TB_FD     OfpPortFeatures = 512
+	OfpPortFeatures_OFPPF_OTHER      OfpPortFeatures = 1024
+	OfpPortFeatures_OFPPF_COPPER     OfpPortFeatures = 2048
+	OfpPortFeatures_OFPPF_FIBER      OfpPortFeatures = 4096
+	OfpPortFeatures_OFPPF_AUTONEG    OfpPortFeatures = 8192
+	OfpPortFeatures_OFPPF_PAUSE      OfpPortFeatures = 16384
+	OfpPortFeatures_OFPPF_PAUSE_ASYM OfpPortFeatures = 32768
+)
+
+var OfpPortFeatures_name = map[int32]string{
+	0:     "OFPPF_INVALID",
+	1:     "OFPPF_10MB_HD",
+	2:     "OFPPF_10MB_FD",
+	4:     "OFPPF_100MB_HD",
+	8:     "OFPPF_100MB_FD",
+	16:    "OFPPF_1GB_HD",
+	32:    "OFPPF_1GB_FD",
+	64:    "OFPPF_10GB_FD",
+	128:   "OFPPF_40GB_FD",
+	256:   "OFPPF_100GB_FD",
+	512:   "OFPPF_1TB_FD",
+	1024:  "OFPPF_OTHER",
+	2048:  "OFPPF_COPPER",
+	4096:  "OFPPF_FIBER",
+	8192:  "OFPPF_AUTONEG",
+	16384: "OFPPF_PAUSE",
+	32768: "OFPPF_PAUSE_ASYM",
+}
+
+var OfpPortFeatures_value = map[string]int32{
+	"OFPPF_INVALID":    0,
+	"OFPPF_10MB_HD":    1,
+	"OFPPF_10MB_FD":    2,
+	"OFPPF_100MB_HD":   4,
+	"OFPPF_100MB_FD":   8,
+	"OFPPF_1GB_HD":     16,
+	"OFPPF_1GB_FD":     32,
+	"OFPPF_10GB_FD":    64,
+	"OFPPF_40GB_FD":    128,
+	"OFPPF_100GB_FD":   256,
+	"OFPPF_1TB_FD":     512,
+	"OFPPF_OTHER":      1024,
+	"OFPPF_COPPER":     2048,
+	"OFPPF_FIBER":      4096,
+	"OFPPF_AUTONEG":    8192,
+	"OFPPF_PAUSE":      16384,
+	"OFPPF_PAUSE_ASYM": 32768,
+}
+
+func (x OfpPortFeatures) String() string {
+	return proto.EnumName(OfpPortFeatures_name, int32(x))
+}
+
+func (OfpPortFeatures) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{9}
+}
+
+// What changed about the physical port
+type OfpPortReason int32
+
+const (
+	OfpPortReason_OFPPR_ADD    OfpPortReason = 0
+	OfpPortReason_OFPPR_DELETE OfpPortReason = 1
+	OfpPortReason_OFPPR_MODIFY OfpPortReason = 2
+)
+
+var OfpPortReason_name = map[int32]string{
+	0: "OFPPR_ADD",
+	1: "OFPPR_DELETE",
+	2: "OFPPR_MODIFY",
+}
+
+var OfpPortReason_value = map[string]int32{
+	"OFPPR_ADD":    0,
+	"OFPPR_DELETE": 1,
+	"OFPPR_MODIFY": 2,
+}
+
+func (x OfpPortReason) String() string {
+	return proto.EnumName(OfpPortReason_name, int32(x))
+}
+
+func (OfpPortReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{10}
+}
+
+// What changed about the physical device
+type OfpDeviceConnection int32
+
+const (
+	OfpDeviceConnection_OFPDEV_CONNECTED    OfpDeviceConnection = 0
+	OfpDeviceConnection_OFPDEV_DISCONNECTED OfpDeviceConnection = 1
+)
+
+var OfpDeviceConnection_name = map[int32]string{
+	0: "OFPDEV_CONNECTED",
+	1: "OFPDEV_DISCONNECTED",
+}
+
+var OfpDeviceConnection_value = map[string]int32{
+	"OFPDEV_CONNECTED":    0,
+	"OFPDEV_DISCONNECTED": 1,
+}
+
+func (x OfpDeviceConnection) String() string {
+	return proto.EnumName(OfpDeviceConnection_name, int32(x))
+}
+
+func (OfpDeviceConnection) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{11}
+}
+
+// The match type indicates the match structure (set of fields that compose the
+// match) in use. The match type is placed in the type field at the beginning
+// of all match structures. The "OpenFlow Extensible Match" type corresponds
+// to OXM TLV format described below and must be supported by all OpenFlow
+// switches. Extensions that define other match types may be published on the
+// ONF wiki. Support for extensions is optional.
+type OfpMatchType int32
+
+const (
+	OfpMatchType_OFPMT_STANDARD OfpMatchType = 0
+	OfpMatchType_OFPMT_OXM      OfpMatchType = 1
+)
+
+var OfpMatchType_name = map[int32]string{
+	0: "OFPMT_STANDARD",
+	1: "OFPMT_OXM",
+}
+
+var OfpMatchType_value = map[string]int32{
+	"OFPMT_STANDARD": 0,
+	"OFPMT_OXM":      1,
+}
+
+func (x OfpMatchType) String() string {
+	return proto.EnumName(OfpMatchType_name, int32(x))
+}
+
+func (OfpMatchType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{12}
+}
+
+// OXM Class IDs.
+// The high order bit differentiate reserved classes from member classes.
+// Classes 0x0000 to 0x7FFF are member classes, allocated by ONF.
+// Classes 0x8000 to 0xFFFE are reserved classes, reserved for standardisation.
+type OfpOxmClass int32
+
+const (
+	OfpOxmClass_OFPXMC_NXM_0          OfpOxmClass = 0
+	OfpOxmClass_OFPXMC_NXM_1          OfpOxmClass = 1
+	OfpOxmClass_OFPXMC_OPENFLOW_BASIC OfpOxmClass = 32768
+	OfpOxmClass_OFPXMC_EXPERIMENTER   OfpOxmClass = 65535
+)
+
+var OfpOxmClass_name = map[int32]string{
+	0:     "OFPXMC_NXM_0",
+	1:     "OFPXMC_NXM_1",
+	32768: "OFPXMC_OPENFLOW_BASIC",
+	65535: "OFPXMC_EXPERIMENTER",
+}
+
+var OfpOxmClass_value = map[string]int32{
+	"OFPXMC_NXM_0":          0,
+	"OFPXMC_NXM_1":          1,
+	"OFPXMC_OPENFLOW_BASIC": 32768,
+	"OFPXMC_EXPERIMENTER":   65535,
+}
+
+func (x OfpOxmClass) String() string {
+	return proto.EnumName(OfpOxmClass_name, int32(x))
+}
+
+func (OfpOxmClass) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{13}
+}
+
+// OXM Flow field types for OpenFlow basic class.
+type OxmOfbFieldTypes int32
+
+const (
+	OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT        OxmOfbFieldTypes = 0
+	OxmOfbFieldTypes_OFPXMT_OFB_IN_PHY_PORT    OxmOfbFieldTypes = 1
+	OxmOfbFieldTypes_OFPXMT_OFB_METADATA       OxmOfbFieldTypes = 2
+	OxmOfbFieldTypes_OFPXMT_OFB_ETH_DST        OxmOfbFieldTypes = 3
+	OxmOfbFieldTypes_OFPXMT_OFB_ETH_SRC        OxmOfbFieldTypes = 4
+	OxmOfbFieldTypes_OFPXMT_OFB_ETH_TYPE       OxmOfbFieldTypes = 5
+	OxmOfbFieldTypes_OFPXMT_OFB_VLAN_VID       OxmOfbFieldTypes = 6
+	OxmOfbFieldTypes_OFPXMT_OFB_VLAN_PCP       OxmOfbFieldTypes = 7
+	OxmOfbFieldTypes_OFPXMT_OFB_IP_DSCP        OxmOfbFieldTypes = 8
+	OxmOfbFieldTypes_OFPXMT_OFB_IP_ECN         OxmOfbFieldTypes = 9
+	OxmOfbFieldTypes_OFPXMT_OFB_IP_PROTO       OxmOfbFieldTypes = 10
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV4_SRC       OxmOfbFieldTypes = 11
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV4_DST       OxmOfbFieldTypes = 12
+	OxmOfbFieldTypes_OFPXMT_OFB_TCP_SRC        OxmOfbFieldTypes = 13
+	OxmOfbFieldTypes_OFPXMT_OFB_TCP_DST        OxmOfbFieldTypes = 14
+	OxmOfbFieldTypes_OFPXMT_OFB_UDP_SRC        OxmOfbFieldTypes = 15
+	OxmOfbFieldTypes_OFPXMT_OFB_UDP_DST        OxmOfbFieldTypes = 16
+	OxmOfbFieldTypes_OFPXMT_OFB_SCTP_SRC       OxmOfbFieldTypes = 17
+	OxmOfbFieldTypes_OFPXMT_OFB_SCTP_DST       OxmOfbFieldTypes = 18
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_TYPE    OxmOfbFieldTypes = 19
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV4_CODE    OxmOfbFieldTypes = 20
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_OP         OxmOfbFieldTypes = 21
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_SPA        OxmOfbFieldTypes = 22
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_TPA        OxmOfbFieldTypes = 23
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_SHA        OxmOfbFieldTypes = 24
+	OxmOfbFieldTypes_OFPXMT_OFB_ARP_THA        OxmOfbFieldTypes = 25
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_SRC       OxmOfbFieldTypes = 26
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_DST       OxmOfbFieldTypes = 27
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_FLABEL    OxmOfbFieldTypes = 28
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_TYPE    OxmOfbFieldTypes = 29
+	OxmOfbFieldTypes_OFPXMT_OFB_ICMPV6_CODE    OxmOfbFieldTypes = 30
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TARGET OxmOfbFieldTypes = 31
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_SLL    OxmOfbFieldTypes = 32
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_ND_TLL    OxmOfbFieldTypes = 33
+	OxmOfbFieldTypes_OFPXMT_OFB_MPLS_LABEL     OxmOfbFieldTypes = 34
+	OxmOfbFieldTypes_OFPXMT_OFB_MPLS_TC        OxmOfbFieldTypes = 35
+	OxmOfbFieldTypes_OFPXMT_OFB_MPLS_BOS       OxmOfbFieldTypes = 36
+	OxmOfbFieldTypes_OFPXMT_OFB_PBB_ISID       OxmOfbFieldTypes = 37
+	OxmOfbFieldTypes_OFPXMT_OFB_TUNNEL_ID      OxmOfbFieldTypes = 38
+	OxmOfbFieldTypes_OFPXMT_OFB_IPV6_EXTHDR    OxmOfbFieldTypes = 39
+)
+
+var OxmOfbFieldTypes_name = map[int32]string{
+	0:  "OFPXMT_OFB_IN_PORT",
+	1:  "OFPXMT_OFB_IN_PHY_PORT",
+	2:  "OFPXMT_OFB_METADATA",
+	3:  "OFPXMT_OFB_ETH_DST",
+	4:  "OFPXMT_OFB_ETH_SRC",
+	5:  "OFPXMT_OFB_ETH_TYPE",
+	6:  "OFPXMT_OFB_VLAN_VID",
+	7:  "OFPXMT_OFB_VLAN_PCP",
+	8:  "OFPXMT_OFB_IP_DSCP",
+	9:  "OFPXMT_OFB_IP_ECN",
+	10: "OFPXMT_OFB_IP_PROTO",
+	11: "OFPXMT_OFB_IPV4_SRC",
+	12: "OFPXMT_OFB_IPV4_DST",
+	13: "OFPXMT_OFB_TCP_SRC",
+	14: "OFPXMT_OFB_TCP_DST",
+	15: "OFPXMT_OFB_UDP_SRC",
+	16: "OFPXMT_OFB_UDP_DST",
+	17: "OFPXMT_OFB_SCTP_SRC",
+	18: "OFPXMT_OFB_SCTP_DST",
+	19: "OFPXMT_OFB_ICMPV4_TYPE",
+	20: "OFPXMT_OFB_ICMPV4_CODE",
+	21: "OFPXMT_OFB_ARP_OP",
+	22: "OFPXMT_OFB_ARP_SPA",
+	23: "OFPXMT_OFB_ARP_TPA",
+	24: "OFPXMT_OFB_ARP_SHA",
+	25: "OFPXMT_OFB_ARP_THA",
+	26: "OFPXMT_OFB_IPV6_SRC",
+	27: "OFPXMT_OFB_IPV6_DST",
+	28: "OFPXMT_OFB_IPV6_FLABEL",
+	29: "OFPXMT_OFB_ICMPV6_TYPE",
+	30: "OFPXMT_OFB_ICMPV6_CODE",
+	31: "OFPXMT_OFB_IPV6_ND_TARGET",
+	32: "OFPXMT_OFB_IPV6_ND_SLL",
+	33: "OFPXMT_OFB_IPV6_ND_TLL",
+	34: "OFPXMT_OFB_MPLS_LABEL",
+	35: "OFPXMT_OFB_MPLS_TC",
+	36: "OFPXMT_OFB_MPLS_BOS",
+	37: "OFPXMT_OFB_PBB_ISID",
+	38: "OFPXMT_OFB_TUNNEL_ID",
+	39: "OFPXMT_OFB_IPV6_EXTHDR",
+}
+
+var OxmOfbFieldTypes_value = map[string]int32{
+	"OFPXMT_OFB_IN_PORT":        0,
+	"OFPXMT_OFB_IN_PHY_PORT":    1,
+	"OFPXMT_OFB_METADATA":       2,
+	"OFPXMT_OFB_ETH_DST":        3,
+	"OFPXMT_OFB_ETH_SRC":        4,
+	"OFPXMT_OFB_ETH_TYPE":       5,
+	"OFPXMT_OFB_VLAN_VID":       6,
+	"OFPXMT_OFB_VLAN_PCP":       7,
+	"OFPXMT_OFB_IP_DSCP":        8,
+	"OFPXMT_OFB_IP_ECN":         9,
+	"OFPXMT_OFB_IP_PROTO":       10,
+	"OFPXMT_OFB_IPV4_SRC":       11,
+	"OFPXMT_OFB_IPV4_DST":       12,
+	"OFPXMT_OFB_TCP_SRC":        13,
+	"OFPXMT_OFB_TCP_DST":        14,
+	"OFPXMT_OFB_UDP_SRC":        15,
+	"OFPXMT_OFB_UDP_DST":        16,
+	"OFPXMT_OFB_SCTP_SRC":       17,
+	"OFPXMT_OFB_SCTP_DST":       18,
+	"OFPXMT_OFB_ICMPV4_TYPE":    19,
+	"OFPXMT_OFB_ICMPV4_CODE":    20,
+	"OFPXMT_OFB_ARP_OP":         21,
+	"OFPXMT_OFB_ARP_SPA":        22,
+	"OFPXMT_OFB_ARP_TPA":        23,
+	"OFPXMT_OFB_ARP_SHA":        24,
+	"OFPXMT_OFB_ARP_THA":        25,
+	"OFPXMT_OFB_IPV6_SRC":       26,
+	"OFPXMT_OFB_IPV6_DST":       27,
+	"OFPXMT_OFB_IPV6_FLABEL":    28,
+	"OFPXMT_OFB_ICMPV6_TYPE":    29,
+	"OFPXMT_OFB_ICMPV6_CODE":    30,
+	"OFPXMT_OFB_IPV6_ND_TARGET": 31,
+	"OFPXMT_OFB_IPV6_ND_SLL":    32,
+	"OFPXMT_OFB_IPV6_ND_TLL":    33,
+	"OFPXMT_OFB_MPLS_LABEL":     34,
+	"OFPXMT_OFB_MPLS_TC":        35,
+	"OFPXMT_OFB_MPLS_BOS":       36,
+	"OFPXMT_OFB_PBB_ISID":       37,
+	"OFPXMT_OFB_TUNNEL_ID":      38,
+	"OFPXMT_OFB_IPV6_EXTHDR":    39,
+}
+
+func (x OxmOfbFieldTypes) String() string {
+	return proto.EnumName(OxmOfbFieldTypes_name, int32(x))
+}
+
+func (OxmOfbFieldTypes) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{14}
+}
+
+// The VLAN id is 12-bits, so we can use the entire 16 bits to indicate
+// special conditions.
+type OfpVlanId int32
+
+const (
+	OfpVlanId_OFPVID_NONE    OfpVlanId = 0
+	OfpVlanId_OFPVID_PRESENT OfpVlanId = 4096
+)
+
+var OfpVlanId_name = map[int32]string{
+	0:    "OFPVID_NONE",
+	4096: "OFPVID_PRESENT",
+}
+
+var OfpVlanId_value = map[string]int32{
+	"OFPVID_NONE":    0,
+	"OFPVID_PRESENT": 4096,
+}
+
+func (x OfpVlanId) String() string {
+	return proto.EnumName(OfpVlanId_name, int32(x))
+}
+
+func (OfpVlanId) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{15}
+}
+
+// Bit definitions for IPv6 Extension Header pseudo-field.
+type OfpIpv6ExthdrFlags int32
+
+const (
+	OfpIpv6ExthdrFlags_OFPIEH_INVALID OfpIpv6ExthdrFlags = 0
+	OfpIpv6ExthdrFlags_OFPIEH_NONEXT  OfpIpv6ExthdrFlags = 1
+	OfpIpv6ExthdrFlags_OFPIEH_ESP     OfpIpv6ExthdrFlags = 2
+	OfpIpv6ExthdrFlags_OFPIEH_AUTH    OfpIpv6ExthdrFlags = 4
+	OfpIpv6ExthdrFlags_OFPIEH_DEST    OfpIpv6ExthdrFlags = 8
+	OfpIpv6ExthdrFlags_OFPIEH_FRAG    OfpIpv6ExthdrFlags = 16
+	OfpIpv6ExthdrFlags_OFPIEH_ROUTER  OfpIpv6ExthdrFlags = 32
+	OfpIpv6ExthdrFlags_OFPIEH_HOP     OfpIpv6ExthdrFlags = 64
+	OfpIpv6ExthdrFlags_OFPIEH_UNREP   OfpIpv6ExthdrFlags = 128
+	OfpIpv6ExthdrFlags_OFPIEH_UNSEQ   OfpIpv6ExthdrFlags = 256
+)
+
+var OfpIpv6ExthdrFlags_name = map[int32]string{
+	0:   "OFPIEH_INVALID",
+	1:   "OFPIEH_NONEXT",
+	2:   "OFPIEH_ESP",
+	4:   "OFPIEH_AUTH",
+	8:   "OFPIEH_DEST",
+	16:  "OFPIEH_FRAG",
+	32:  "OFPIEH_ROUTER",
+	64:  "OFPIEH_HOP",
+	128: "OFPIEH_UNREP",
+	256: "OFPIEH_UNSEQ",
+}
+
+var OfpIpv6ExthdrFlags_value = map[string]int32{
+	"OFPIEH_INVALID": 0,
+	"OFPIEH_NONEXT":  1,
+	"OFPIEH_ESP":     2,
+	"OFPIEH_AUTH":    4,
+	"OFPIEH_DEST":    8,
+	"OFPIEH_FRAG":    16,
+	"OFPIEH_ROUTER":  32,
+	"OFPIEH_HOP":     64,
+	"OFPIEH_UNREP":   128,
+	"OFPIEH_UNSEQ":   256,
+}
+
+func (x OfpIpv6ExthdrFlags) String() string {
+	return proto.EnumName(OfpIpv6ExthdrFlags_name, int32(x))
+}
+
+func (OfpIpv6ExthdrFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{16}
+}
+
+type OfpActionType int32
+
+const (
+	OfpActionType_OFPAT_OUTPUT       OfpActionType = 0
+	OfpActionType_OFPAT_COPY_TTL_OUT OfpActionType = 11
+	OfpActionType_OFPAT_COPY_TTL_IN  OfpActionType = 12
+	OfpActionType_OFPAT_SET_MPLS_TTL OfpActionType = 15
+	OfpActionType_OFPAT_DEC_MPLS_TTL OfpActionType = 16
+	OfpActionType_OFPAT_PUSH_VLAN    OfpActionType = 17
+	OfpActionType_OFPAT_POP_VLAN     OfpActionType = 18
+	OfpActionType_OFPAT_PUSH_MPLS    OfpActionType = 19
+	OfpActionType_OFPAT_POP_MPLS     OfpActionType = 20
+	OfpActionType_OFPAT_SET_QUEUE    OfpActionType = 21
+	OfpActionType_OFPAT_GROUP        OfpActionType = 22
+	OfpActionType_OFPAT_SET_NW_TTL   OfpActionType = 23
+	OfpActionType_OFPAT_DEC_NW_TTL   OfpActionType = 24
+	OfpActionType_OFPAT_SET_FIELD    OfpActionType = 25
+	OfpActionType_OFPAT_PUSH_PBB     OfpActionType = 26
+	OfpActionType_OFPAT_POP_PBB      OfpActionType = 27
+	OfpActionType_OFPAT_EXPERIMENTER OfpActionType = 65535
+)
+
+var OfpActionType_name = map[int32]string{
+	0:     "OFPAT_OUTPUT",
+	11:    "OFPAT_COPY_TTL_OUT",
+	12:    "OFPAT_COPY_TTL_IN",
+	15:    "OFPAT_SET_MPLS_TTL",
+	16:    "OFPAT_DEC_MPLS_TTL",
+	17:    "OFPAT_PUSH_VLAN",
+	18:    "OFPAT_POP_VLAN",
+	19:    "OFPAT_PUSH_MPLS",
+	20:    "OFPAT_POP_MPLS",
+	21:    "OFPAT_SET_QUEUE",
+	22:    "OFPAT_GROUP",
+	23:    "OFPAT_SET_NW_TTL",
+	24:    "OFPAT_DEC_NW_TTL",
+	25:    "OFPAT_SET_FIELD",
+	26:    "OFPAT_PUSH_PBB",
+	27:    "OFPAT_POP_PBB",
+	65535: "OFPAT_EXPERIMENTER",
+}
+
+var OfpActionType_value = map[string]int32{
+	"OFPAT_OUTPUT":       0,
+	"OFPAT_COPY_TTL_OUT": 11,
+	"OFPAT_COPY_TTL_IN":  12,
+	"OFPAT_SET_MPLS_TTL": 15,
+	"OFPAT_DEC_MPLS_TTL": 16,
+	"OFPAT_PUSH_VLAN":    17,
+	"OFPAT_POP_VLAN":     18,
+	"OFPAT_PUSH_MPLS":    19,
+	"OFPAT_POP_MPLS":     20,
+	"OFPAT_SET_QUEUE":    21,
+	"OFPAT_GROUP":        22,
+	"OFPAT_SET_NW_TTL":   23,
+	"OFPAT_DEC_NW_TTL":   24,
+	"OFPAT_SET_FIELD":    25,
+	"OFPAT_PUSH_PBB":     26,
+	"OFPAT_POP_PBB":      27,
+	"OFPAT_EXPERIMENTER": 65535,
+}
+
+func (x OfpActionType) String() string {
+	return proto.EnumName(OfpActionType_name, int32(x))
+}
+
+func (OfpActionType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{17}
+}
+
+type OfpControllerMaxLen int32
+
+const (
+	OfpControllerMaxLen_OFPCML_INVALID   OfpControllerMaxLen = 0
+	OfpControllerMaxLen_OFPCML_MAX       OfpControllerMaxLen = 65509
+	OfpControllerMaxLen_OFPCML_NO_BUFFER OfpControllerMaxLen = 65535
+)
+
+var OfpControllerMaxLen_name = map[int32]string{
+	0:     "OFPCML_INVALID",
+	65509: "OFPCML_MAX",
+	65535: "OFPCML_NO_BUFFER",
+}
+
+var OfpControllerMaxLen_value = map[string]int32{
+	"OFPCML_INVALID":   0,
+	"OFPCML_MAX":       65509,
+	"OFPCML_NO_BUFFER": 65535,
+}
+
+func (x OfpControllerMaxLen) String() string {
+	return proto.EnumName(OfpControllerMaxLen_name, int32(x))
+}
+
+func (OfpControllerMaxLen) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{18}
+}
+
+type OfpInstructionType int32
+
+const (
+	OfpInstructionType_OFPIT_INVALID        OfpInstructionType = 0
+	OfpInstructionType_OFPIT_GOTO_TABLE     OfpInstructionType = 1
+	OfpInstructionType_OFPIT_WRITE_METADATA OfpInstructionType = 2
+	OfpInstructionType_OFPIT_WRITE_ACTIONS  OfpInstructionType = 3
+	OfpInstructionType_OFPIT_APPLY_ACTIONS  OfpInstructionType = 4
+	OfpInstructionType_OFPIT_CLEAR_ACTIONS  OfpInstructionType = 5
+	OfpInstructionType_OFPIT_METER          OfpInstructionType = 6
+	OfpInstructionType_OFPIT_EXPERIMENTER   OfpInstructionType = 65535
+)
+
+var OfpInstructionType_name = map[int32]string{
+	0:     "OFPIT_INVALID",
+	1:     "OFPIT_GOTO_TABLE",
+	2:     "OFPIT_WRITE_METADATA",
+	3:     "OFPIT_WRITE_ACTIONS",
+	4:     "OFPIT_APPLY_ACTIONS",
+	5:     "OFPIT_CLEAR_ACTIONS",
+	6:     "OFPIT_METER",
+	65535: "OFPIT_EXPERIMENTER",
+}
+
+var OfpInstructionType_value = map[string]int32{
+	"OFPIT_INVALID":        0,
+	"OFPIT_GOTO_TABLE":     1,
+	"OFPIT_WRITE_METADATA": 2,
+	"OFPIT_WRITE_ACTIONS":  3,
+	"OFPIT_APPLY_ACTIONS":  4,
+	"OFPIT_CLEAR_ACTIONS":  5,
+	"OFPIT_METER":          6,
+	"OFPIT_EXPERIMENTER":   65535,
+}
+
+func (x OfpInstructionType) String() string {
+	return proto.EnumName(OfpInstructionType_name, int32(x))
+}
+
+func (OfpInstructionType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{19}
+}
+
+type OfpFlowModCommand int32
+
+const (
+	OfpFlowModCommand_OFPFC_ADD           OfpFlowModCommand = 0
+	OfpFlowModCommand_OFPFC_MODIFY        OfpFlowModCommand = 1
+	OfpFlowModCommand_OFPFC_MODIFY_STRICT OfpFlowModCommand = 2
+	OfpFlowModCommand_OFPFC_DELETE        OfpFlowModCommand = 3
+	OfpFlowModCommand_OFPFC_DELETE_STRICT OfpFlowModCommand = 4
+)
+
+var OfpFlowModCommand_name = map[int32]string{
+	0: "OFPFC_ADD",
+	1: "OFPFC_MODIFY",
+	2: "OFPFC_MODIFY_STRICT",
+	3: "OFPFC_DELETE",
+	4: "OFPFC_DELETE_STRICT",
+}
+
+var OfpFlowModCommand_value = map[string]int32{
+	"OFPFC_ADD":           0,
+	"OFPFC_MODIFY":        1,
+	"OFPFC_MODIFY_STRICT": 2,
+	"OFPFC_DELETE":        3,
+	"OFPFC_DELETE_STRICT": 4,
+}
+
+func (x OfpFlowModCommand) String() string {
+	return proto.EnumName(OfpFlowModCommand_name, int32(x))
+}
+
+func (OfpFlowModCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{20}
+}
+
+type OfpFlowModFlags int32
+
+const (
+	OfpFlowModFlags_OFPFF_INVALID       OfpFlowModFlags = 0
+	OfpFlowModFlags_OFPFF_SEND_FLOW_REM OfpFlowModFlags = 1
+	OfpFlowModFlags_OFPFF_CHECK_OVERLAP OfpFlowModFlags = 2
+	OfpFlowModFlags_OFPFF_RESET_COUNTS  OfpFlowModFlags = 4
+	OfpFlowModFlags_OFPFF_NO_PKT_COUNTS OfpFlowModFlags = 8
+	OfpFlowModFlags_OFPFF_NO_BYT_COUNTS OfpFlowModFlags = 16
+)
+
+var OfpFlowModFlags_name = map[int32]string{
+	0:  "OFPFF_INVALID",
+	1:  "OFPFF_SEND_FLOW_REM",
+	2:  "OFPFF_CHECK_OVERLAP",
+	4:  "OFPFF_RESET_COUNTS",
+	8:  "OFPFF_NO_PKT_COUNTS",
+	16: "OFPFF_NO_BYT_COUNTS",
+}
+
+var OfpFlowModFlags_value = map[string]int32{
+	"OFPFF_INVALID":       0,
+	"OFPFF_SEND_FLOW_REM": 1,
+	"OFPFF_CHECK_OVERLAP": 2,
+	"OFPFF_RESET_COUNTS":  4,
+	"OFPFF_NO_PKT_COUNTS": 8,
+	"OFPFF_NO_BYT_COUNTS": 16,
+}
+
+func (x OfpFlowModFlags) String() string {
+	return proto.EnumName(OfpFlowModFlags_name, int32(x))
+}
+
+func (OfpFlowModFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{21}
+}
+
+// Group numbering. Groups can use any number up to OFPG_MAX.
+type OfpGroup int32
+
+const (
+	OfpGroup_OFPG_INVALID OfpGroup = 0
+	// Last usable group number.
+	OfpGroup_OFPG_MAX OfpGroup = 2147483392
+	// Fake groups.
+	OfpGroup_OFPG_ALL OfpGroup = 2147483644
+	OfpGroup_OFPG_ANY OfpGroup = 2147483647
+)
+
+var OfpGroup_name = map[int32]string{
+	0:          "OFPG_INVALID",
+	2147483392: "OFPG_MAX",
+	2147483644: "OFPG_ALL",
+	2147483647: "OFPG_ANY",
+}
+
+var OfpGroup_value = map[string]int32{
+	"OFPG_INVALID": 0,
+	"OFPG_MAX":     2147483392,
+	"OFPG_ALL":     2147483644,
+	"OFPG_ANY":     2147483647,
+}
+
+func (x OfpGroup) String() string {
+	return proto.EnumName(OfpGroup_name, int32(x))
+}
+
+func (OfpGroup) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{22}
+}
+
+// Group commands
+type OfpGroupModCommand int32
+
+const (
+	OfpGroupModCommand_OFPGC_ADD    OfpGroupModCommand = 0
+	OfpGroupModCommand_OFPGC_MODIFY OfpGroupModCommand = 1
+	OfpGroupModCommand_OFPGC_DELETE OfpGroupModCommand = 2
+)
+
+var OfpGroupModCommand_name = map[int32]string{
+	0: "OFPGC_ADD",
+	1: "OFPGC_MODIFY",
+	2: "OFPGC_DELETE",
+}
+
+var OfpGroupModCommand_value = map[string]int32{
+	"OFPGC_ADD":    0,
+	"OFPGC_MODIFY": 1,
+	"OFPGC_DELETE": 2,
+}
+
+func (x OfpGroupModCommand) String() string {
+	return proto.EnumName(OfpGroupModCommand_name, int32(x))
+}
+
+func (OfpGroupModCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{23}
+}
+
+// Group types.  Values in the range [128; 255] are reserved for experimental
+// use.
+type OfpGroupType int32
+
+const (
+	OfpGroupType_OFPGT_ALL      OfpGroupType = 0
+	OfpGroupType_OFPGT_SELECT   OfpGroupType = 1
+	OfpGroupType_OFPGT_INDIRECT OfpGroupType = 2
+	OfpGroupType_OFPGT_FF       OfpGroupType = 3
+)
+
+var OfpGroupType_name = map[int32]string{
+	0: "OFPGT_ALL",
+	1: "OFPGT_SELECT",
+	2: "OFPGT_INDIRECT",
+	3: "OFPGT_FF",
+}
+
+var OfpGroupType_value = map[string]int32{
+	"OFPGT_ALL":      0,
+	"OFPGT_SELECT":   1,
+	"OFPGT_INDIRECT": 2,
+	"OFPGT_FF":       3,
+}
+
+func (x OfpGroupType) String() string {
+	return proto.EnumName(OfpGroupType_name, int32(x))
+}
+
+func (OfpGroupType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{24}
+}
+
+// Why is this packet being sent to the controller?
+type OfpPacketInReason int32
+
+const (
+	OfpPacketInReason_OFPR_NO_MATCH    OfpPacketInReason = 0
+	OfpPacketInReason_OFPR_ACTION      OfpPacketInReason = 1
+	OfpPacketInReason_OFPR_INVALID_TTL OfpPacketInReason = 2
+)
+
+var OfpPacketInReason_name = map[int32]string{
+	0: "OFPR_NO_MATCH",
+	1: "OFPR_ACTION",
+	2: "OFPR_INVALID_TTL",
+}
+
+var OfpPacketInReason_value = map[string]int32{
+	"OFPR_NO_MATCH":    0,
+	"OFPR_ACTION":      1,
+	"OFPR_INVALID_TTL": 2,
+}
+
+func (x OfpPacketInReason) String() string {
+	return proto.EnumName(OfpPacketInReason_name, int32(x))
+}
+
+func (OfpPacketInReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{25}
+}
+
+// Why was this flow removed?
+type OfpFlowRemovedReason int32
+
+const (
+	OfpFlowRemovedReason_OFPRR_IDLE_TIMEOUT OfpFlowRemovedReason = 0
+	OfpFlowRemovedReason_OFPRR_HARD_TIMEOUT OfpFlowRemovedReason = 1
+	OfpFlowRemovedReason_OFPRR_DELETE       OfpFlowRemovedReason = 2
+	OfpFlowRemovedReason_OFPRR_GROUP_DELETE OfpFlowRemovedReason = 3
+	OfpFlowRemovedReason_OFPRR_METER_DELETE OfpFlowRemovedReason = 4
+)
+
+var OfpFlowRemovedReason_name = map[int32]string{
+	0: "OFPRR_IDLE_TIMEOUT",
+	1: "OFPRR_HARD_TIMEOUT",
+	2: "OFPRR_DELETE",
+	3: "OFPRR_GROUP_DELETE",
+	4: "OFPRR_METER_DELETE",
+}
+
+var OfpFlowRemovedReason_value = map[string]int32{
+	"OFPRR_IDLE_TIMEOUT": 0,
+	"OFPRR_HARD_TIMEOUT": 1,
+	"OFPRR_DELETE":       2,
+	"OFPRR_GROUP_DELETE": 3,
+	"OFPRR_METER_DELETE": 4,
+}
+
+func (x OfpFlowRemovedReason) String() string {
+	return proto.EnumName(OfpFlowRemovedReason_name, int32(x))
+}
+
+func (OfpFlowRemovedReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{26}
+}
+
+// Meter numbering. Flow meters can use any number up to OFPM_MAX.
+type OfpMeter int32
+
+const (
+	OfpMeter_OFPM_ZERO OfpMeter = 0
+	// Last usable meter.
+	OfpMeter_OFPM_MAX OfpMeter = 2147418112
+	// Virtual meters.
+	OfpMeter_OFPM_SLOWPATH   OfpMeter = 2147483645
+	OfpMeter_OFPM_CONTROLLER OfpMeter = 2147483646
+	OfpMeter_OFPM_ALL        OfpMeter = 2147483647
+)
+
+var OfpMeter_name = map[int32]string{
+	0:          "OFPM_ZERO",
+	2147418112: "OFPM_MAX",
+	2147483645: "OFPM_SLOWPATH",
+	2147483646: "OFPM_CONTROLLER",
+	2147483647: "OFPM_ALL",
+}
+
+var OfpMeter_value = map[string]int32{
+	"OFPM_ZERO":       0,
+	"OFPM_MAX":        2147418112,
+	"OFPM_SLOWPATH":   2147483645,
+	"OFPM_CONTROLLER": 2147483646,
+	"OFPM_ALL":        2147483647,
+}
+
+func (x OfpMeter) String() string {
+	return proto.EnumName(OfpMeter_name, int32(x))
+}
+
+func (OfpMeter) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{27}
+}
+
+// Meter band types
+type OfpMeterBandType int32
+
+const (
+	OfpMeterBandType_OFPMBT_INVALID      OfpMeterBandType = 0
+	OfpMeterBandType_OFPMBT_DROP         OfpMeterBandType = 1
+	OfpMeterBandType_OFPMBT_DSCP_REMARK  OfpMeterBandType = 2
+	OfpMeterBandType_OFPMBT_EXPERIMENTER OfpMeterBandType = 65535
+)
+
+var OfpMeterBandType_name = map[int32]string{
+	0:     "OFPMBT_INVALID",
+	1:     "OFPMBT_DROP",
+	2:     "OFPMBT_DSCP_REMARK",
+	65535: "OFPMBT_EXPERIMENTER",
+}
+
+var OfpMeterBandType_value = map[string]int32{
+	"OFPMBT_INVALID":      0,
+	"OFPMBT_DROP":         1,
+	"OFPMBT_DSCP_REMARK":  2,
+	"OFPMBT_EXPERIMENTER": 65535,
+}
+
+func (x OfpMeterBandType) String() string {
+	return proto.EnumName(OfpMeterBandType_name, int32(x))
+}
+
+func (OfpMeterBandType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{28}
+}
+
+// Meter commands
+type OfpMeterModCommand int32
+
+const (
+	OfpMeterModCommand_OFPMC_ADD    OfpMeterModCommand = 0
+	OfpMeterModCommand_OFPMC_MODIFY OfpMeterModCommand = 1
+	OfpMeterModCommand_OFPMC_DELETE OfpMeterModCommand = 2
+)
+
+var OfpMeterModCommand_name = map[int32]string{
+	0: "OFPMC_ADD",
+	1: "OFPMC_MODIFY",
+	2: "OFPMC_DELETE",
+}
+
+var OfpMeterModCommand_value = map[string]int32{
+	"OFPMC_ADD":    0,
+	"OFPMC_MODIFY": 1,
+	"OFPMC_DELETE": 2,
+}
+
+func (x OfpMeterModCommand) String() string {
+	return proto.EnumName(OfpMeterModCommand_name, int32(x))
+}
+
+func (OfpMeterModCommand) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{29}
+}
+
+// Meter configuration flags
+type OfpMeterFlags int32
+
+const (
+	OfpMeterFlags_OFPMF_INVALID OfpMeterFlags = 0
+	OfpMeterFlags_OFPMF_KBPS    OfpMeterFlags = 1
+	OfpMeterFlags_OFPMF_PKTPS   OfpMeterFlags = 2
+	OfpMeterFlags_OFPMF_BURST   OfpMeterFlags = 4
+	OfpMeterFlags_OFPMF_STATS   OfpMeterFlags = 8
+)
+
+var OfpMeterFlags_name = map[int32]string{
+	0: "OFPMF_INVALID",
+	1: "OFPMF_KBPS",
+	2: "OFPMF_PKTPS",
+	4: "OFPMF_BURST",
+	8: "OFPMF_STATS",
+}
+
+var OfpMeterFlags_value = map[string]int32{
+	"OFPMF_INVALID": 0,
+	"OFPMF_KBPS":    1,
+	"OFPMF_PKTPS":   2,
+	"OFPMF_BURST":   4,
+	"OFPMF_STATS":   8,
+}
+
+func (x OfpMeterFlags) String() string {
+	return proto.EnumName(OfpMeterFlags_name, int32(x))
+}
+
+func (OfpMeterFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{30}
+}
+
+// Values for 'type' in ofp_error_message.  These values are immutable: they
+// will not change in future versions of the protocol (although new values may
+// be added).
+type OfpErrorType int32
+
+const (
+	OfpErrorType_OFPET_HELLO_FAILED          OfpErrorType = 0
+	OfpErrorType_OFPET_BAD_REQUEST           OfpErrorType = 1
+	OfpErrorType_OFPET_BAD_ACTION            OfpErrorType = 2
+	OfpErrorType_OFPET_BAD_INSTRUCTION       OfpErrorType = 3
+	OfpErrorType_OFPET_BAD_MATCH             OfpErrorType = 4
+	OfpErrorType_OFPET_FLOW_MOD_FAILED       OfpErrorType = 5
+	OfpErrorType_OFPET_GROUP_MOD_FAILED      OfpErrorType = 6
+	OfpErrorType_OFPET_PORT_MOD_FAILED       OfpErrorType = 7
+	OfpErrorType_OFPET_TABLE_MOD_FAILED      OfpErrorType = 8
+	OfpErrorType_OFPET_QUEUE_OP_FAILED       OfpErrorType = 9
+	OfpErrorType_OFPET_SWITCH_CONFIG_FAILED  OfpErrorType = 10
+	OfpErrorType_OFPET_ROLE_REQUEST_FAILED   OfpErrorType = 11
+	OfpErrorType_OFPET_METER_MOD_FAILED      OfpErrorType = 12
+	OfpErrorType_OFPET_TABLE_FEATURES_FAILED OfpErrorType = 13
+	OfpErrorType_OFPET_EXPERIMENTER          OfpErrorType = 65535
+)
+
+var OfpErrorType_name = map[int32]string{
+	0:     "OFPET_HELLO_FAILED",
+	1:     "OFPET_BAD_REQUEST",
+	2:     "OFPET_BAD_ACTION",
+	3:     "OFPET_BAD_INSTRUCTION",
+	4:     "OFPET_BAD_MATCH",
+	5:     "OFPET_FLOW_MOD_FAILED",
+	6:     "OFPET_GROUP_MOD_FAILED",
+	7:     "OFPET_PORT_MOD_FAILED",
+	8:     "OFPET_TABLE_MOD_FAILED",
+	9:     "OFPET_QUEUE_OP_FAILED",
+	10:    "OFPET_SWITCH_CONFIG_FAILED",
+	11:    "OFPET_ROLE_REQUEST_FAILED",
+	12:    "OFPET_METER_MOD_FAILED",
+	13:    "OFPET_TABLE_FEATURES_FAILED",
+	65535: "OFPET_EXPERIMENTER",
+}
+
+var OfpErrorType_value = map[string]int32{
+	"OFPET_HELLO_FAILED":          0,
+	"OFPET_BAD_REQUEST":           1,
+	"OFPET_BAD_ACTION":            2,
+	"OFPET_BAD_INSTRUCTION":       3,
+	"OFPET_BAD_MATCH":             4,
+	"OFPET_FLOW_MOD_FAILED":       5,
+	"OFPET_GROUP_MOD_FAILED":      6,
+	"OFPET_PORT_MOD_FAILED":       7,
+	"OFPET_TABLE_MOD_FAILED":      8,
+	"OFPET_QUEUE_OP_FAILED":       9,
+	"OFPET_SWITCH_CONFIG_FAILED":  10,
+	"OFPET_ROLE_REQUEST_FAILED":   11,
+	"OFPET_METER_MOD_FAILED":      12,
+	"OFPET_TABLE_FEATURES_FAILED": 13,
+	"OFPET_EXPERIMENTER":          65535,
+}
+
+func (x OfpErrorType) String() string {
+	return proto.EnumName(OfpErrorType_name, int32(x))
+}
+
+func (OfpErrorType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{31}
+}
+
+// ofp_error_msg 'code' values for OFPET_HELLO_FAILED.  'data' contains an
+// ASCII text string that may give failure details.
+type OfpHelloFailedCode int32
+
+const (
+	OfpHelloFailedCode_OFPHFC_INCOMPATIBLE OfpHelloFailedCode = 0
+	OfpHelloFailedCode_OFPHFC_EPERM        OfpHelloFailedCode = 1
+)
+
+var OfpHelloFailedCode_name = map[int32]string{
+	0: "OFPHFC_INCOMPATIBLE",
+	1: "OFPHFC_EPERM",
+}
+
+var OfpHelloFailedCode_value = map[string]int32{
+	"OFPHFC_INCOMPATIBLE": 0,
+	"OFPHFC_EPERM":        1,
+}
+
+func (x OfpHelloFailedCode) String() string {
+	return proto.EnumName(OfpHelloFailedCode_name, int32(x))
+}
+
+func (OfpHelloFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{32}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_REQUEST.  'data' contains at least
+// the first 64 bytes of the failed request.
+type OfpBadRequestCode int32
+
+const (
+	OfpBadRequestCode_OFPBRC_BAD_VERSION               OfpBadRequestCode = 0
+	OfpBadRequestCode_OFPBRC_BAD_TYPE                  OfpBadRequestCode = 1
+	OfpBadRequestCode_OFPBRC_BAD_MULTIPART             OfpBadRequestCode = 2
+	OfpBadRequestCode_OFPBRC_BAD_EXPERIMENTER          OfpBadRequestCode = 3
+	OfpBadRequestCode_OFPBRC_BAD_EXP_TYPE              OfpBadRequestCode = 4
+	OfpBadRequestCode_OFPBRC_EPERM                     OfpBadRequestCode = 5
+	OfpBadRequestCode_OFPBRC_BAD_LEN                   OfpBadRequestCode = 6
+	OfpBadRequestCode_OFPBRC_BUFFER_EMPTY              OfpBadRequestCode = 7
+	OfpBadRequestCode_OFPBRC_BUFFER_UNKNOWN            OfpBadRequestCode = 8
+	OfpBadRequestCode_OFPBRC_BAD_TABLE_ID              OfpBadRequestCode = 9
+	OfpBadRequestCode_OFPBRC_IS_SLAVE                  OfpBadRequestCode = 10
+	OfpBadRequestCode_OFPBRC_BAD_PORT                  OfpBadRequestCode = 11
+	OfpBadRequestCode_OFPBRC_BAD_PACKET                OfpBadRequestCode = 12
+	OfpBadRequestCode_OFPBRC_MULTIPART_BUFFER_OVERFLOW OfpBadRequestCode = 13
+)
+
+var OfpBadRequestCode_name = map[int32]string{
+	0:  "OFPBRC_BAD_VERSION",
+	1:  "OFPBRC_BAD_TYPE",
+	2:  "OFPBRC_BAD_MULTIPART",
+	3:  "OFPBRC_BAD_EXPERIMENTER",
+	4:  "OFPBRC_BAD_EXP_TYPE",
+	5:  "OFPBRC_EPERM",
+	6:  "OFPBRC_BAD_LEN",
+	7:  "OFPBRC_BUFFER_EMPTY",
+	8:  "OFPBRC_BUFFER_UNKNOWN",
+	9:  "OFPBRC_BAD_TABLE_ID",
+	10: "OFPBRC_IS_SLAVE",
+	11: "OFPBRC_BAD_PORT",
+	12: "OFPBRC_BAD_PACKET",
+	13: "OFPBRC_MULTIPART_BUFFER_OVERFLOW",
+}
+
+var OfpBadRequestCode_value = map[string]int32{
+	"OFPBRC_BAD_VERSION":               0,
+	"OFPBRC_BAD_TYPE":                  1,
+	"OFPBRC_BAD_MULTIPART":             2,
+	"OFPBRC_BAD_EXPERIMENTER":          3,
+	"OFPBRC_BAD_EXP_TYPE":              4,
+	"OFPBRC_EPERM":                     5,
+	"OFPBRC_BAD_LEN":                   6,
+	"OFPBRC_BUFFER_EMPTY":              7,
+	"OFPBRC_BUFFER_UNKNOWN":            8,
+	"OFPBRC_BAD_TABLE_ID":              9,
+	"OFPBRC_IS_SLAVE":                  10,
+	"OFPBRC_BAD_PORT":                  11,
+	"OFPBRC_BAD_PACKET":                12,
+	"OFPBRC_MULTIPART_BUFFER_OVERFLOW": 13,
+}
+
+func (x OfpBadRequestCode) String() string {
+	return proto.EnumName(OfpBadRequestCode_name, int32(x))
+}
+
+func (OfpBadRequestCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{33}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_ACTION.  'data' contains at least
+// the first 64 bytes of the failed request.
+type OfpBadActionCode int32
+
+const (
+	OfpBadActionCode_OFPBAC_BAD_TYPE           OfpBadActionCode = 0
+	OfpBadActionCode_OFPBAC_BAD_LEN            OfpBadActionCode = 1
+	OfpBadActionCode_OFPBAC_BAD_EXPERIMENTER   OfpBadActionCode = 2
+	OfpBadActionCode_OFPBAC_BAD_EXP_TYPE       OfpBadActionCode = 3
+	OfpBadActionCode_OFPBAC_BAD_OUT_PORT       OfpBadActionCode = 4
+	OfpBadActionCode_OFPBAC_BAD_ARGUMENT       OfpBadActionCode = 5
+	OfpBadActionCode_OFPBAC_EPERM              OfpBadActionCode = 6
+	OfpBadActionCode_OFPBAC_TOO_MANY           OfpBadActionCode = 7
+	OfpBadActionCode_OFPBAC_BAD_QUEUE          OfpBadActionCode = 8
+	OfpBadActionCode_OFPBAC_BAD_OUT_GROUP      OfpBadActionCode = 9
+	OfpBadActionCode_OFPBAC_MATCH_INCONSISTENT OfpBadActionCode = 10
+	OfpBadActionCode_OFPBAC_UNSUPPORTED_ORDER  OfpBadActionCode = 11
+	OfpBadActionCode_OFPBAC_BAD_TAG            OfpBadActionCode = 12
+	OfpBadActionCode_OFPBAC_BAD_SET_TYPE       OfpBadActionCode = 13
+	OfpBadActionCode_OFPBAC_BAD_SET_LEN        OfpBadActionCode = 14
+	OfpBadActionCode_OFPBAC_BAD_SET_ARGUMENT   OfpBadActionCode = 15
+)
+
+var OfpBadActionCode_name = map[int32]string{
+	0:  "OFPBAC_BAD_TYPE",
+	1:  "OFPBAC_BAD_LEN",
+	2:  "OFPBAC_BAD_EXPERIMENTER",
+	3:  "OFPBAC_BAD_EXP_TYPE",
+	4:  "OFPBAC_BAD_OUT_PORT",
+	5:  "OFPBAC_BAD_ARGUMENT",
+	6:  "OFPBAC_EPERM",
+	7:  "OFPBAC_TOO_MANY",
+	8:  "OFPBAC_BAD_QUEUE",
+	9:  "OFPBAC_BAD_OUT_GROUP",
+	10: "OFPBAC_MATCH_INCONSISTENT",
+	11: "OFPBAC_UNSUPPORTED_ORDER",
+	12: "OFPBAC_BAD_TAG",
+	13: "OFPBAC_BAD_SET_TYPE",
+	14: "OFPBAC_BAD_SET_LEN",
+	15: "OFPBAC_BAD_SET_ARGUMENT",
+}
+
+var OfpBadActionCode_value = map[string]int32{
+	"OFPBAC_BAD_TYPE":           0,
+	"OFPBAC_BAD_LEN":            1,
+	"OFPBAC_BAD_EXPERIMENTER":   2,
+	"OFPBAC_BAD_EXP_TYPE":       3,
+	"OFPBAC_BAD_OUT_PORT":       4,
+	"OFPBAC_BAD_ARGUMENT":       5,
+	"OFPBAC_EPERM":              6,
+	"OFPBAC_TOO_MANY":           7,
+	"OFPBAC_BAD_QUEUE":          8,
+	"OFPBAC_BAD_OUT_GROUP":      9,
+	"OFPBAC_MATCH_INCONSISTENT": 10,
+	"OFPBAC_UNSUPPORTED_ORDER":  11,
+	"OFPBAC_BAD_TAG":            12,
+	"OFPBAC_BAD_SET_TYPE":       13,
+	"OFPBAC_BAD_SET_LEN":        14,
+	"OFPBAC_BAD_SET_ARGUMENT":   15,
+}
+
+func (x OfpBadActionCode) String() string {
+	return proto.EnumName(OfpBadActionCode_name, int32(x))
+}
+
+func (OfpBadActionCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{34}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_INSTRUCTION.  'data' contains at
+// least the first 64 bytes of the failed request.
+type OfpBadInstructionCode int32
+
+const (
+	OfpBadInstructionCode_OFPBIC_UNKNOWN_INST        OfpBadInstructionCode = 0
+	OfpBadInstructionCode_OFPBIC_UNSUP_INST          OfpBadInstructionCode = 1
+	OfpBadInstructionCode_OFPBIC_BAD_TABLE_ID        OfpBadInstructionCode = 2
+	OfpBadInstructionCode_OFPBIC_UNSUP_METADATA      OfpBadInstructionCode = 3
+	OfpBadInstructionCode_OFPBIC_UNSUP_METADATA_MASK OfpBadInstructionCode = 4
+	OfpBadInstructionCode_OFPBIC_BAD_EXPERIMENTER    OfpBadInstructionCode = 5
+	OfpBadInstructionCode_OFPBIC_BAD_EXP_TYPE        OfpBadInstructionCode = 6
+	OfpBadInstructionCode_OFPBIC_BAD_LEN             OfpBadInstructionCode = 7
+	OfpBadInstructionCode_OFPBIC_EPERM               OfpBadInstructionCode = 8
+)
+
+var OfpBadInstructionCode_name = map[int32]string{
+	0: "OFPBIC_UNKNOWN_INST",
+	1: "OFPBIC_UNSUP_INST",
+	2: "OFPBIC_BAD_TABLE_ID",
+	3: "OFPBIC_UNSUP_METADATA",
+	4: "OFPBIC_UNSUP_METADATA_MASK",
+	5: "OFPBIC_BAD_EXPERIMENTER",
+	6: "OFPBIC_BAD_EXP_TYPE",
+	7: "OFPBIC_BAD_LEN",
+	8: "OFPBIC_EPERM",
+}
+
+var OfpBadInstructionCode_value = map[string]int32{
+	"OFPBIC_UNKNOWN_INST":        0,
+	"OFPBIC_UNSUP_INST":          1,
+	"OFPBIC_BAD_TABLE_ID":        2,
+	"OFPBIC_UNSUP_METADATA":      3,
+	"OFPBIC_UNSUP_METADATA_MASK": 4,
+	"OFPBIC_BAD_EXPERIMENTER":    5,
+	"OFPBIC_BAD_EXP_TYPE":        6,
+	"OFPBIC_BAD_LEN":             7,
+	"OFPBIC_EPERM":               8,
+}
+
+func (x OfpBadInstructionCode) String() string {
+	return proto.EnumName(OfpBadInstructionCode_name, int32(x))
+}
+
+func (OfpBadInstructionCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{35}
+}
+
+// ofp_error_msg 'code' values for OFPET_BAD_MATCH.  'data' contains at least
+// the first 64 bytes of the failed request.
+type OfpBadMatchCode int32
+
+const (
+	OfpBadMatchCode_OFPBMC_BAD_TYPE         OfpBadMatchCode = 0
+	OfpBadMatchCode_OFPBMC_BAD_LEN          OfpBadMatchCode = 1
+	OfpBadMatchCode_OFPBMC_BAD_TAG          OfpBadMatchCode = 2
+	OfpBadMatchCode_OFPBMC_BAD_DL_ADDR_MASK OfpBadMatchCode = 3
+	OfpBadMatchCode_OFPBMC_BAD_NW_ADDR_MASK OfpBadMatchCode = 4
+	OfpBadMatchCode_OFPBMC_BAD_WILDCARDS    OfpBadMatchCode = 5
+	OfpBadMatchCode_OFPBMC_BAD_FIELD        OfpBadMatchCode = 6
+	OfpBadMatchCode_OFPBMC_BAD_VALUE        OfpBadMatchCode = 7
+	OfpBadMatchCode_OFPBMC_BAD_MASK         OfpBadMatchCode = 8
+	OfpBadMatchCode_OFPBMC_BAD_PREREQ       OfpBadMatchCode = 9
+	OfpBadMatchCode_OFPBMC_DUP_FIELD        OfpBadMatchCode = 10
+	OfpBadMatchCode_OFPBMC_EPERM            OfpBadMatchCode = 11
+)
+
+var OfpBadMatchCode_name = map[int32]string{
+	0:  "OFPBMC_BAD_TYPE",
+	1:  "OFPBMC_BAD_LEN",
+	2:  "OFPBMC_BAD_TAG",
+	3:  "OFPBMC_BAD_DL_ADDR_MASK",
+	4:  "OFPBMC_BAD_NW_ADDR_MASK",
+	5:  "OFPBMC_BAD_WILDCARDS",
+	6:  "OFPBMC_BAD_FIELD",
+	7:  "OFPBMC_BAD_VALUE",
+	8:  "OFPBMC_BAD_MASK",
+	9:  "OFPBMC_BAD_PREREQ",
+	10: "OFPBMC_DUP_FIELD",
+	11: "OFPBMC_EPERM",
+}
+
+var OfpBadMatchCode_value = map[string]int32{
+	"OFPBMC_BAD_TYPE":         0,
+	"OFPBMC_BAD_LEN":          1,
+	"OFPBMC_BAD_TAG":          2,
+	"OFPBMC_BAD_DL_ADDR_MASK": 3,
+	"OFPBMC_BAD_NW_ADDR_MASK": 4,
+	"OFPBMC_BAD_WILDCARDS":    5,
+	"OFPBMC_BAD_FIELD":        6,
+	"OFPBMC_BAD_VALUE":        7,
+	"OFPBMC_BAD_MASK":         8,
+	"OFPBMC_BAD_PREREQ":       9,
+	"OFPBMC_DUP_FIELD":        10,
+	"OFPBMC_EPERM":            11,
+}
+
+func (x OfpBadMatchCode) String() string {
+	return proto.EnumName(OfpBadMatchCode_name, int32(x))
+}
+
+func (OfpBadMatchCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{36}
+}
+
+// ofp_error_msg 'code' values for OFPET_FLOW_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpFlowModFailedCode int32
+
+const (
+	OfpFlowModFailedCode_OFPFMFC_UNKNOWN      OfpFlowModFailedCode = 0
+	OfpFlowModFailedCode_OFPFMFC_TABLE_FULL   OfpFlowModFailedCode = 1
+	OfpFlowModFailedCode_OFPFMFC_BAD_TABLE_ID OfpFlowModFailedCode = 2
+	OfpFlowModFailedCode_OFPFMFC_OVERLAP      OfpFlowModFailedCode = 3
+	OfpFlowModFailedCode_OFPFMFC_EPERM        OfpFlowModFailedCode = 4
+	OfpFlowModFailedCode_OFPFMFC_BAD_TIMEOUT  OfpFlowModFailedCode = 5
+	OfpFlowModFailedCode_OFPFMFC_BAD_COMMAND  OfpFlowModFailedCode = 6
+	OfpFlowModFailedCode_OFPFMFC_BAD_FLAGS    OfpFlowModFailedCode = 7
+)
+
+var OfpFlowModFailedCode_name = map[int32]string{
+	0: "OFPFMFC_UNKNOWN",
+	1: "OFPFMFC_TABLE_FULL",
+	2: "OFPFMFC_BAD_TABLE_ID",
+	3: "OFPFMFC_OVERLAP",
+	4: "OFPFMFC_EPERM",
+	5: "OFPFMFC_BAD_TIMEOUT",
+	6: "OFPFMFC_BAD_COMMAND",
+	7: "OFPFMFC_BAD_FLAGS",
+}
+
+var OfpFlowModFailedCode_value = map[string]int32{
+	"OFPFMFC_UNKNOWN":      0,
+	"OFPFMFC_TABLE_FULL":   1,
+	"OFPFMFC_BAD_TABLE_ID": 2,
+	"OFPFMFC_OVERLAP":      3,
+	"OFPFMFC_EPERM":        4,
+	"OFPFMFC_BAD_TIMEOUT":  5,
+	"OFPFMFC_BAD_COMMAND":  6,
+	"OFPFMFC_BAD_FLAGS":    7,
+}
+
+func (x OfpFlowModFailedCode) String() string {
+	return proto.EnumName(OfpFlowModFailedCode_name, int32(x))
+}
+
+func (OfpFlowModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{37}
+}
+
+// ofp_error_msg 'code' values for OFPET_GROUP_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpGroupModFailedCode int32
+
+const (
+	OfpGroupModFailedCode_OFPGMFC_GROUP_EXISTS         OfpGroupModFailedCode = 0
+	OfpGroupModFailedCode_OFPGMFC_INVALID_GROUP        OfpGroupModFailedCode = 1
+	OfpGroupModFailedCode_OFPGMFC_WEIGHT_UNSUPPORTED   OfpGroupModFailedCode = 2
+	OfpGroupModFailedCode_OFPGMFC_OUT_OF_GROUPS        OfpGroupModFailedCode = 3
+	OfpGroupModFailedCode_OFPGMFC_OUT_OF_BUCKETS       OfpGroupModFailedCode = 4
+	OfpGroupModFailedCode_OFPGMFC_CHAINING_UNSUPPORTED OfpGroupModFailedCode = 5
+	OfpGroupModFailedCode_OFPGMFC_WATCH_UNSUPPORTED    OfpGroupModFailedCode = 6
+	OfpGroupModFailedCode_OFPGMFC_LOOP                 OfpGroupModFailedCode = 7
+	OfpGroupModFailedCode_OFPGMFC_UNKNOWN_GROUP        OfpGroupModFailedCode = 8
+	OfpGroupModFailedCode_OFPGMFC_CHAINED_GROUP        OfpGroupModFailedCode = 9
+	OfpGroupModFailedCode_OFPGMFC_BAD_TYPE             OfpGroupModFailedCode = 10
+	OfpGroupModFailedCode_OFPGMFC_BAD_COMMAND          OfpGroupModFailedCode = 11
+	OfpGroupModFailedCode_OFPGMFC_BAD_BUCKET           OfpGroupModFailedCode = 12
+	OfpGroupModFailedCode_OFPGMFC_BAD_WATCH            OfpGroupModFailedCode = 13
+	OfpGroupModFailedCode_OFPGMFC_EPERM                OfpGroupModFailedCode = 14
+)
+
+var OfpGroupModFailedCode_name = map[int32]string{
+	0:  "OFPGMFC_GROUP_EXISTS",
+	1:  "OFPGMFC_INVALID_GROUP",
+	2:  "OFPGMFC_WEIGHT_UNSUPPORTED",
+	3:  "OFPGMFC_OUT_OF_GROUPS",
+	4:  "OFPGMFC_OUT_OF_BUCKETS",
+	5:  "OFPGMFC_CHAINING_UNSUPPORTED",
+	6:  "OFPGMFC_WATCH_UNSUPPORTED",
+	7:  "OFPGMFC_LOOP",
+	8:  "OFPGMFC_UNKNOWN_GROUP",
+	9:  "OFPGMFC_CHAINED_GROUP",
+	10: "OFPGMFC_BAD_TYPE",
+	11: "OFPGMFC_BAD_COMMAND",
+	12: "OFPGMFC_BAD_BUCKET",
+	13: "OFPGMFC_BAD_WATCH",
+	14: "OFPGMFC_EPERM",
+}
+
+var OfpGroupModFailedCode_value = map[string]int32{
+	"OFPGMFC_GROUP_EXISTS":         0,
+	"OFPGMFC_INVALID_GROUP":        1,
+	"OFPGMFC_WEIGHT_UNSUPPORTED":   2,
+	"OFPGMFC_OUT_OF_GROUPS":        3,
+	"OFPGMFC_OUT_OF_BUCKETS":       4,
+	"OFPGMFC_CHAINING_UNSUPPORTED": 5,
+	"OFPGMFC_WATCH_UNSUPPORTED":    6,
+	"OFPGMFC_LOOP":                 7,
+	"OFPGMFC_UNKNOWN_GROUP":        8,
+	"OFPGMFC_CHAINED_GROUP":        9,
+	"OFPGMFC_BAD_TYPE":             10,
+	"OFPGMFC_BAD_COMMAND":          11,
+	"OFPGMFC_BAD_BUCKET":           12,
+	"OFPGMFC_BAD_WATCH":            13,
+	"OFPGMFC_EPERM":                14,
+}
+
+func (x OfpGroupModFailedCode) String() string {
+	return proto.EnumName(OfpGroupModFailedCode_name, int32(x))
+}
+
+func (OfpGroupModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{38}
+}
+
+// ofp_error_msg 'code' values for OFPET_PORT_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpPortModFailedCode int32
+
+const (
+	OfpPortModFailedCode_OFPPMFC_BAD_PORT      OfpPortModFailedCode = 0
+	OfpPortModFailedCode_OFPPMFC_BAD_HW_ADDR   OfpPortModFailedCode = 1
+	OfpPortModFailedCode_OFPPMFC_BAD_CONFIG    OfpPortModFailedCode = 2
+	OfpPortModFailedCode_OFPPMFC_BAD_ADVERTISE OfpPortModFailedCode = 3
+	OfpPortModFailedCode_OFPPMFC_EPERM         OfpPortModFailedCode = 4
+)
+
+var OfpPortModFailedCode_name = map[int32]string{
+	0: "OFPPMFC_BAD_PORT",
+	1: "OFPPMFC_BAD_HW_ADDR",
+	2: "OFPPMFC_BAD_CONFIG",
+	3: "OFPPMFC_BAD_ADVERTISE",
+	4: "OFPPMFC_EPERM",
+}
+
+var OfpPortModFailedCode_value = map[string]int32{
+	"OFPPMFC_BAD_PORT":      0,
+	"OFPPMFC_BAD_HW_ADDR":   1,
+	"OFPPMFC_BAD_CONFIG":    2,
+	"OFPPMFC_BAD_ADVERTISE": 3,
+	"OFPPMFC_EPERM":         4,
+}
+
+func (x OfpPortModFailedCode) String() string {
+	return proto.EnumName(OfpPortModFailedCode_name, int32(x))
+}
+
+func (OfpPortModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{39}
+}
+
+// ofp_error_msg 'code' values for OFPET_TABLE_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpTableModFailedCode int32
+
+const (
+	OfpTableModFailedCode_OFPTMFC_BAD_TABLE  OfpTableModFailedCode = 0
+	OfpTableModFailedCode_OFPTMFC_BAD_CONFIG OfpTableModFailedCode = 1
+	OfpTableModFailedCode_OFPTMFC_EPERM      OfpTableModFailedCode = 2
+)
+
+var OfpTableModFailedCode_name = map[int32]string{
+	0: "OFPTMFC_BAD_TABLE",
+	1: "OFPTMFC_BAD_CONFIG",
+	2: "OFPTMFC_EPERM",
+}
+
+var OfpTableModFailedCode_value = map[string]int32{
+	"OFPTMFC_BAD_TABLE":  0,
+	"OFPTMFC_BAD_CONFIG": 1,
+	"OFPTMFC_EPERM":      2,
+}
+
+func (x OfpTableModFailedCode) String() string {
+	return proto.EnumName(OfpTableModFailedCode_name, int32(x))
+}
+
+func (OfpTableModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{40}
+}
+
+// ofp_error msg 'code' values for OFPET_QUEUE_OP_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request
+type OfpQueueOpFailedCode int32
+
+const (
+	OfpQueueOpFailedCode_OFPQOFC_BAD_PORT  OfpQueueOpFailedCode = 0
+	OfpQueueOpFailedCode_OFPQOFC_BAD_QUEUE OfpQueueOpFailedCode = 1
+	OfpQueueOpFailedCode_OFPQOFC_EPERM     OfpQueueOpFailedCode = 2
+)
+
+var OfpQueueOpFailedCode_name = map[int32]string{
+	0: "OFPQOFC_BAD_PORT",
+	1: "OFPQOFC_BAD_QUEUE",
+	2: "OFPQOFC_EPERM",
+}
+
+var OfpQueueOpFailedCode_value = map[string]int32{
+	"OFPQOFC_BAD_PORT":  0,
+	"OFPQOFC_BAD_QUEUE": 1,
+	"OFPQOFC_EPERM":     2,
+}
+
+func (x OfpQueueOpFailedCode) String() string {
+	return proto.EnumName(OfpQueueOpFailedCode_name, int32(x))
+}
+
+func (OfpQueueOpFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{41}
+}
+
+// ofp_error_msg 'code' values for OFPET_SWITCH_CONFIG_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpSwitchConfigFailedCode int32
+
+const (
+	OfpSwitchConfigFailedCode_OFPSCFC_BAD_FLAGS OfpSwitchConfigFailedCode = 0
+	OfpSwitchConfigFailedCode_OFPSCFC_BAD_LEN   OfpSwitchConfigFailedCode = 1
+	OfpSwitchConfigFailedCode_OFPSCFC_EPERM     OfpSwitchConfigFailedCode = 2
+)
+
+var OfpSwitchConfigFailedCode_name = map[int32]string{
+	0: "OFPSCFC_BAD_FLAGS",
+	1: "OFPSCFC_BAD_LEN",
+	2: "OFPSCFC_EPERM",
+}
+
+var OfpSwitchConfigFailedCode_value = map[string]int32{
+	"OFPSCFC_BAD_FLAGS": 0,
+	"OFPSCFC_BAD_LEN":   1,
+	"OFPSCFC_EPERM":     2,
+}
+
+func (x OfpSwitchConfigFailedCode) String() string {
+	return proto.EnumName(OfpSwitchConfigFailedCode_name, int32(x))
+}
+
+func (OfpSwitchConfigFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{42}
+}
+
+// ofp_error_msg 'code' values for OFPET_ROLE_REQUEST_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpRoleRequestFailedCode int32
+
+const (
+	OfpRoleRequestFailedCode_OFPRRFC_STALE    OfpRoleRequestFailedCode = 0
+	OfpRoleRequestFailedCode_OFPRRFC_UNSUP    OfpRoleRequestFailedCode = 1
+	OfpRoleRequestFailedCode_OFPRRFC_BAD_ROLE OfpRoleRequestFailedCode = 2
+)
+
+var OfpRoleRequestFailedCode_name = map[int32]string{
+	0: "OFPRRFC_STALE",
+	1: "OFPRRFC_UNSUP",
+	2: "OFPRRFC_BAD_ROLE",
+}
+
+var OfpRoleRequestFailedCode_value = map[string]int32{
+	"OFPRRFC_STALE":    0,
+	"OFPRRFC_UNSUP":    1,
+	"OFPRRFC_BAD_ROLE": 2,
+}
+
+func (x OfpRoleRequestFailedCode) String() string {
+	return proto.EnumName(OfpRoleRequestFailedCode_name, int32(x))
+}
+
+func (OfpRoleRequestFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{43}
+}
+
+// ofp_error_msg 'code' values for OFPET_METER_MOD_FAILED.  'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpMeterModFailedCode int32
+
+const (
+	OfpMeterModFailedCode_OFPMMFC_UNKNOWN         OfpMeterModFailedCode = 0
+	OfpMeterModFailedCode_OFPMMFC_METER_EXISTS    OfpMeterModFailedCode = 1
+	OfpMeterModFailedCode_OFPMMFC_INVALID_METER   OfpMeterModFailedCode = 2
+	OfpMeterModFailedCode_OFPMMFC_UNKNOWN_METER   OfpMeterModFailedCode = 3
+	OfpMeterModFailedCode_OFPMMFC_BAD_COMMAND     OfpMeterModFailedCode = 4
+	OfpMeterModFailedCode_OFPMMFC_BAD_FLAGS       OfpMeterModFailedCode = 5
+	OfpMeterModFailedCode_OFPMMFC_BAD_RATE        OfpMeterModFailedCode = 6
+	OfpMeterModFailedCode_OFPMMFC_BAD_BURST       OfpMeterModFailedCode = 7
+	OfpMeterModFailedCode_OFPMMFC_BAD_BAND        OfpMeterModFailedCode = 8
+	OfpMeterModFailedCode_OFPMMFC_BAD_BAND_DETAIL OfpMeterModFailedCode = 9
+	OfpMeterModFailedCode_OFPMMFC_OUT_OF_METERS   OfpMeterModFailedCode = 10
+	OfpMeterModFailedCode_OFPMMFC_OUT_OF_BANDS    OfpMeterModFailedCode = 11
+)
+
+var OfpMeterModFailedCode_name = map[int32]string{
+	0:  "OFPMMFC_UNKNOWN",
+	1:  "OFPMMFC_METER_EXISTS",
+	2:  "OFPMMFC_INVALID_METER",
+	3:  "OFPMMFC_UNKNOWN_METER",
+	4:  "OFPMMFC_BAD_COMMAND",
+	5:  "OFPMMFC_BAD_FLAGS",
+	6:  "OFPMMFC_BAD_RATE",
+	7:  "OFPMMFC_BAD_BURST",
+	8:  "OFPMMFC_BAD_BAND",
+	9:  "OFPMMFC_BAD_BAND_DETAIL",
+	10: "OFPMMFC_OUT_OF_METERS",
+	11: "OFPMMFC_OUT_OF_BANDS",
+}
+
+var OfpMeterModFailedCode_value = map[string]int32{
+	"OFPMMFC_UNKNOWN":         0,
+	"OFPMMFC_METER_EXISTS":    1,
+	"OFPMMFC_INVALID_METER":   2,
+	"OFPMMFC_UNKNOWN_METER":   3,
+	"OFPMMFC_BAD_COMMAND":     4,
+	"OFPMMFC_BAD_FLAGS":       5,
+	"OFPMMFC_BAD_RATE":        6,
+	"OFPMMFC_BAD_BURST":       7,
+	"OFPMMFC_BAD_BAND":        8,
+	"OFPMMFC_BAD_BAND_DETAIL": 9,
+	"OFPMMFC_OUT_OF_METERS":   10,
+	"OFPMMFC_OUT_OF_BANDS":    11,
+}
+
+func (x OfpMeterModFailedCode) String() string {
+	return proto.EnumName(OfpMeterModFailedCode_name, int32(x))
+}
+
+func (OfpMeterModFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{44}
+}
+
+// ofp_error_msg 'code' values for OFPET_TABLE_FEATURES_FAILED. 'data' contains
+// at least the first 64 bytes of the failed request.
+type OfpTableFeaturesFailedCode int32
+
+const (
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_TABLE    OfpTableFeaturesFailedCode = 0
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_METADATA OfpTableFeaturesFailedCode = 1
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_TYPE     OfpTableFeaturesFailedCode = 2
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_LEN      OfpTableFeaturesFailedCode = 3
+	OfpTableFeaturesFailedCode_OFPTFFC_BAD_ARGUMENT OfpTableFeaturesFailedCode = 4
+	OfpTableFeaturesFailedCode_OFPTFFC_EPERM        OfpTableFeaturesFailedCode = 5
+)
+
+var OfpTableFeaturesFailedCode_name = map[int32]string{
+	0: "OFPTFFC_BAD_TABLE",
+	1: "OFPTFFC_BAD_METADATA",
+	2: "OFPTFFC_BAD_TYPE",
+	3: "OFPTFFC_BAD_LEN",
+	4: "OFPTFFC_BAD_ARGUMENT",
+	5: "OFPTFFC_EPERM",
+}
+
+var OfpTableFeaturesFailedCode_value = map[string]int32{
+	"OFPTFFC_BAD_TABLE":    0,
+	"OFPTFFC_BAD_METADATA": 1,
+	"OFPTFFC_BAD_TYPE":     2,
+	"OFPTFFC_BAD_LEN":      3,
+	"OFPTFFC_BAD_ARGUMENT": 4,
+	"OFPTFFC_EPERM":        5,
+}
+
+func (x OfpTableFeaturesFailedCode) String() string {
+	return proto.EnumName(OfpTableFeaturesFailedCode_name, int32(x))
+}
+
+func (OfpTableFeaturesFailedCode) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{45}
+}
+
+type OfpMultipartType int32
+
+const (
+	// Description of this OpenFlow switch.
+	// The request body is empty.
+	// The reply body is struct ofp_desc.
+	OfpMultipartType_OFPMP_DESC OfpMultipartType = 0
+	// Individual flow statistics.
+	// The request body is struct ofp_flow_stats_request.
+	// The reply body is an array of struct ofp_flow_stats.
+	OfpMultipartType_OFPMP_FLOW OfpMultipartType = 1
+	// Aggregate flow statistics.
+	// The request body is struct ofp_aggregate_stats_request.
+	// The reply body is struct ofp_aggregate_stats_reply.
+	OfpMultipartType_OFPMP_AGGREGATE OfpMultipartType = 2
+	// Flow table statistics.
+	// The request body is empty.
+	// The reply body is an array of struct ofp_table_stats.
+	OfpMultipartType_OFPMP_TABLE OfpMultipartType = 3
+	// Port statistics.
+	// The request body is struct ofp_port_stats_request.
+	// The reply body is an array of struct ofp_port_stats.
+	OfpMultipartType_OFPMP_PORT_STATS OfpMultipartType = 4
+	// Queue statistics for a port
+	// The request body is struct ofp_queue_stats_request.
+	// The reply body is an array of struct ofp_queue_stats
+	OfpMultipartType_OFPMP_QUEUE OfpMultipartType = 5
+	// Group counter statistics.
+	// The request body is struct ofp_group_stats_request.
+	// The reply is an array of struct ofp_group_stats.
+	OfpMultipartType_OFPMP_GROUP OfpMultipartType = 6
+	// Group description.
+	// The request body is empty.
+	// The reply body is an array of struct ofp_group_desc.
+	OfpMultipartType_OFPMP_GROUP_DESC OfpMultipartType = 7
+	// Group features.
+	// The request body is empty.
+	// The reply body is struct ofp_group_features.
+	OfpMultipartType_OFPMP_GROUP_FEATURES OfpMultipartType = 8
+	// Meter statistics.
+	// The request body is struct ofp_meter_multipart_requests.
+	// The reply body is an array of struct ofp_meter_stats.
+	OfpMultipartType_OFPMP_METER OfpMultipartType = 9
+	// Meter configuration.
+	// The request body is struct ofp_meter_multipart_requests.
+	// The reply body is an array of struct ofp_meter_config.
+	OfpMultipartType_OFPMP_METER_CONFIG OfpMultipartType = 10
+	// Meter features.
+	// The request body is empty.
+	// The reply body is struct ofp_meter_features.
+	OfpMultipartType_OFPMP_METER_FEATURES OfpMultipartType = 11
+	// Table features.
+	// The request body is either empty or contains an array of
+	// struct ofp_table_features containing the controller's
+	// desired view of the switch. If the switch is unable to
+	// set the specified view an error is returned.
+	// The reply body is an array of struct ofp_table_features.
+	OfpMultipartType_OFPMP_TABLE_FEATURES OfpMultipartType = 12
+	// Port description.
+	// The request body is empty.
+	// The reply body is an array of struct ofp_port.
+	OfpMultipartType_OFPMP_PORT_DESC OfpMultipartType = 13
+	// Experimenter extension.
+	// The request and reply bodies begin with
+	// struct ofp_experimenter_multipart_header.
+	// The request and reply bodies are otherwise experimenter-defined.
+	OfpMultipartType_OFPMP_EXPERIMENTER OfpMultipartType = 65535
+)
+
+var OfpMultipartType_name = map[int32]string{
+	0:     "OFPMP_DESC",
+	1:     "OFPMP_FLOW",
+	2:     "OFPMP_AGGREGATE",
+	3:     "OFPMP_TABLE",
+	4:     "OFPMP_PORT_STATS",
+	5:     "OFPMP_QUEUE",
+	6:     "OFPMP_GROUP",
+	7:     "OFPMP_GROUP_DESC",
+	8:     "OFPMP_GROUP_FEATURES",
+	9:     "OFPMP_METER",
+	10:    "OFPMP_METER_CONFIG",
+	11:    "OFPMP_METER_FEATURES",
+	12:    "OFPMP_TABLE_FEATURES",
+	13:    "OFPMP_PORT_DESC",
+	65535: "OFPMP_EXPERIMENTER",
+}
+
+var OfpMultipartType_value = map[string]int32{
+	"OFPMP_DESC":           0,
+	"OFPMP_FLOW":           1,
+	"OFPMP_AGGREGATE":      2,
+	"OFPMP_TABLE":          3,
+	"OFPMP_PORT_STATS":     4,
+	"OFPMP_QUEUE":          5,
+	"OFPMP_GROUP":          6,
+	"OFPMP_GROUP_DESC":     7,
+	"OFPMP_GROUP_FEATURES": 8,
+	"OFPMP_METER":          9,
+	"OFPMP_METER_CONFIG":   10,
+	"OFPMP_METER_FEATURES": 11,
+	"OFPMP_TABLE_FEATURES": 12,
+	"OFPMP_PORT_DESC":      13,
+	"OFPMP_EXPERIMENTER":   65535,
+}
+
+func (x OfpMultipartType) String() string {
+	return proto.EnumName(OfpMultipartType_name, int32(x))
+}
+
+func (OfpMultipartType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{46}
+}
+
+type OfpMultipartRequestFlags int32
+
+const (
+	OfpMultipartRequestFlags_OFPMPF_REQ_INVALID OfpMultipartRequestFlags = 0
+	OfpMultipartRequestFlags_OFPMPF_REQ_MORE    OfpMultipartRequestFlags = 1
+)
+
+var OfpMultipartRequestFlags_name = map[int32]string{
+	0: "OFPMPF_REQ_INVALID",
+	1: "OFPMPF_REQ_MORE",
+}
+
+var OfpMultipartRequestFlags_value = map[string]int32{
+	"OFPMPF_REQ_INVALID": 0,
+	"OFPMPF_REQ_MORE":    1,
+}
+
+func (x OfpMultipartRequestFlags) String() string {
+	return proto.EnumName(OfpMultipartRequestFlags_name, int32(x))
+}
+
+func (OfpMultipartRequestFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{47}
+}
+
+type OfpMultipartReplyFlags int32
+
+const (
+	OfpMultipartReplyFlags_OFPMPF_REPLY_INVALID OfpMultipartReplyFlags = 0
+	OfpMultipartReplyFlags_OFPMPF_REPLY_MORE    OfpMultipartReplyFlags = 1
+)
+
+var OfpMultipartReplyFlags_name = map[int32]string{
+	0: "OFPMPF_REPLY_INVALID",
+	1: "OFPMPF_REPLY_MORE",
+}
+
+var OfpMultipartReplyFlags_value = map[string]int32{
+	"OFPMPF_REPLY_INVALID": 0,
+	"OFPMPF_REPLY_MORE":    1,
+}
+
+func (x OfpMultipartReplyFlags) String() string {
+	return proto.EnumName(OfpMultipartReplyFlags_name, int32(x))
+}
+
+func (OfpMultipartReplyFlags) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{48}
+}
+
+// Table Feature property types.
+// Low order bit cleared indicates a property for a regular Flow Entry.
+// Low order bit set indicates a property for the Table-Miss Flow Entry.
+type OfpTableFeaturePropType int32
+
+const (
+	OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS        OfpTableFeaturePropType = 0
+	OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS_MISS   OfpTableFeaturePropType = 1
+	OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES         OfpTableFeaturePropType = 2
+	OfpTableFeaturePropType_OFPTFPT_NEXT_TABLES_MISS    OfpTableFeaturePropType = 3
+	OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS       OfpTableFeaturePropType = 4
+	OfpTableFeaturePropType_OFPTFPT_WRITE_ACTIONS_MISS  OfpTableFeaturePropType = 5
+	OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS       OfpTableFeaturePropType = 6
+	OfpTableFeaturePropType_OFPTFPT_APPLY_ACTIONS_MISS  OfpTableFeaturePropType = 7
+	OfpTableFeaturePropType_OFPTFPT_MATCH               OfpTableFeaturePropType = 8
+	OfpTableFeaturePropType_OFPTFPT_WILDCARDS           OfpTableFeaturePropType = 10
+	OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD      OfpTableFeaturePropType = 12
+	OfpTableFeaturePropType_OFPTFPT_WRITE_SETFIELD_MISS OfpTableFeaturePropType = 13
+	OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD      OfpTableFeaturePropType = 14
+	OfpTableFeaturePropType_OFPTFPT_APPLY_SETFIELD_MISS OfpTableFeaturePropType = 15
+	OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER        OfpTableFeaturePropType = 65534
+	OfpTableFeaturePropType_OFPTFPT_EXPERIMENTER_MISS   OfpTableFeaturePropType = 65535
+)
+
+var OfpTableFeaturePropType_name = map[int32]string{
+	0:     "OFPTFPT_INSTRUCTIONS",
+	1:     "OFPTFPT_INSTRUCTIONS_MISS",
+	2:     "OFPTFPT_NEXT_TABLES",
+	3:     "OFPTFPT_NEXT_TABLES_MISS",
+	4:     "OFPTFPT_WRITE_ACTIONS",
+	5:     "OFPTFPT_WRITE_ACTIONS_MISS",
+	6:     "OFPTFPT_APPLY_ACTIONS",
+	7:     "OFPTFPT_APPLY_ACTIONS_MISS",
+	8:     "OFPTFPT_MATCH",
+	10:    "OFPTFPT_WILDCARDS",
+	12:    "OFPTFPT_WRITE_SETFIELD",
+	13:    "OFPTFPT_WRITE_SETFIELD_MISS",
+	14:    "OFPTFPT_APPLY_SETFIELD",
+	15:    "OFPTFPT_APPLY_SETFIELD_MISS",
+	65534: "OFPTFPT_EXPERIMENTER",
+	65535: "OFPTFPT_EXPERIMENTER_MISS",
+}
+
+var OfpTableFeaturePropType_value = map[string]int32{
+	"OFPTFPT_INSTRUCTIONS":        0,
+	"OFPTFPT_INSTRUCTIONS_MISS":   1,
+	"OFPTFPT_NEXT_TABLES":         2,
+	"OFPTFPT_NEXT_TABLES_MISS":    3,
+	"OFPTFPT_WRITE_ACTIONS":       4,
+	"OFPTFPT_WRITE_ACTIONS_MISS":  5,
+	"OFPTFPT_APPLY_ACTIONS":       6,
+	"OFPTFPT_APPLY_ACTIONS_MISS":  7,
+	"OFPTFPT_MATCH":               8,
+	"OFPTFPT_WILDCARDS":           10,
+	"OFPTFPT_WRITE_SETFIELD":      12,
+	"OFPTFPT_WRITE_SETFIELD_MISS": 13,
+	"OFPTFPT_APPLY_SETFIELD":      14,
+	"OFPTFPT_APPLY_SETFIELD_MISS": 15,
+	"OFPTFPT_EXPERIMENTER":        65534,
+	"OFPTFPT_EXPERIMENTER_MISS":   65535,
+}
+
+func (x OfpTableFeaturePropType) String() string {
+	return proto.EnumName(OfpTableFeaturePropType_name, int32(x))
+}
+
+func (OfpTableFeaturePropType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{49}
+}
+
+// Group configuration flags
+type OfpGroupCapabilities int32
+
+const (
+	OfpGroupCapabilities_OFPGFC_INVALID         OfpGroupCapabilities = 0
+	OfpGroupCapabilities_OFPGFC_SELECT_WEIGHT   OfpGroupCapabilities = 1
+	OfpGroupCapabilities_OFPGFC_SELECT_LIVENESS OfpGroupCapabilities = 2
+	OfpGroupCapabilities_OFPGFC_CHAINING        OfpGroupCapabilities = 4
+	OfpGroupCapabilities_OFPGFC_CHAINING_CHECKS OfpGroupCapabilities = 8
+)
+
+var OfpGroupCapabilities_name = map[int32]string{
+	0: "OFPGFC_INVALID",
+	1: "OFPGFC_SELECT_WEIGHT",
+	2: "OFPGFC_SELECT_LIVENESS",
+	4: "OFPGFC_CHAINING",
+	8: "OFPGFC_CHAINING_CHECKS",
+}
+
+var OfpGroupCapabilities_value = map[string]int32{
+	"OFPGFC_INVALID":         0,
+	"OFPGFC_SELECT_WEIGHT":   1,
+	"OFPGFC_SELECT_LIVENESS": 2,
+	"OFPGFC_CHAINING":        4,
+	"OFPGFC_CHAINING_CHECKS": 8,
+}
+
+func (x OfpGroupCapabilities) String() string {
+	return proto.EnumName(OfpGroupCapabilities_name, int32(x))
+}
+
+func (OfpGroupCapabilities) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{50}
+}
+
+type OfpQueueProperties int32
+
+const (
+	OfpQueueProperties_OFPQT_INVALID      OfpQueueProperties = 0
+	OfpQueueProperties_OFPQT_MIN_RATE     OfpQueueProperties = 1
+	OfpQueueProperties_OFPQT_MAX_RATE     OfpQueueProperties = 2
+	OfpQueueProperties_OFPQT_EXPERIMENTER OfpQueueProperties = 65535
+)
+
+var OfpQueueProperties_name = map[int32]string{
+	0:     "OFPQT_INVALID",
+	1:     "OFPQT_MIN_RATE",
+	2:     "OFPQT_MAX_RATE",
+	65535: "OFPQT_EXPERIMENTER",
+}
+
+var OfpQueueProperties_value = map[string]int32{
+	"OFPQT_INVALID":      0,
+	"OFPQT_MIN_RATE":     1,
+	"OFPQT_MAX_RATE":     2,
+	"OFPQT_EXPERIMENTER": 65535,
+}
+
+func (x OfpQueueProperties) String() string {
+	return proto.EnumName(OfpQueueProperties_name, int32(x))
+}
+
+func (OfpQueueProperties) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{51}
+}
+
+// Controller roles.
+type OfpControllerRole int32
+
+const (
+	OfpControllerRole_OFPCR_ROLE_NOCHANGE OfpControllerRole = 0
+	OfpControllerRole_OFPCR_ROLE_EQUAL    OfpControllerRole = 1
+	OfpControllerRole_OFPCR_ROLE_MASTER   OfpControllerRole = 2
+	OfpControllerRole_OFPCR_ROLE_SLAVE    OfpControllerRole = 3
+)
+
+var OfpControllerRole_name = map[int32]string{
+	0: "OFPCR_ROLE_NOCHANGE",
+	1: "OFPCR_ROLE_EQUAL",
+	2: "OFPCR_ROLE_MASTER",
+	3: "OFPCR_ROLE_SLAVE",
+}
+
+var OfpControllerRole_value = map[string]int32{
+	"OFPCR_ROLE_NOCHANGE": 0,
+	"OFPCR_ROLE_EQUAL":    1,
+	"OFPCR_ROLE_MASTER":   2,
+	"OFPCR_ROLE_SLAVE":    3,
+}
+
+func (x OfpControllerRole) String() string {
+	return proto.EnumName(OfpControllerRole_name, int32(x))
+}
+
+func (OfpControllerRole) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{52}
+}
+
+// Header on all OpenFlow packets.
+type OfpHeader struct {
+	Version              uint32   `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"`
+	Type                 OfpType  `protobuf:"varint,2,opt,name=type,proto3,enum=openflow_13.OfpType" json:"type,omitempty"`
+	Xid                  uint32   `protobuf:"varint,3,opt,name=xid,proto3" json:"xid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpHeader) Reset()         { *m = OfpHeader{} }
+func (m *OfpHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpHeader) ProtoMessage()    {}
+func (*OfpHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{0}
+}
+
+func (m *OfpHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHeader.Unmarshal(m, b)
+}
+func (m *OfpHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHeader.Merge(m, src)
+}
+func (m *OfpHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpHeader.Size(m)
+}
+func (m *OfpHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHeader proto.InternalMessageInfo
+
+func (m *OfpHeader) GetVersion() uint32 {
+	if m != nil {
+		return m.Version
+	}
+	return 0
+}
+
+func (m *OfpHeader) GetType() OfpType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpType_OFPT_HELLO
+}
+
+func (m *OfpHeader) GetXid() uint32 {
+	if m != nil {
+		return m.Xid
+	}
+	return 0
+}
+
+// Common header for all Hello Elements
+type OfpHelloElemHeader struct {
+	Type OfpHelloElemType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpHelloElemType" json:"type,omitempty"`
+	// Types that are valid to be assigned to Element:
+	//	*OfpHelloElemHeader_Versionbitmap
+	Element              isOfpHelloElemHeader_Element `protobuf_oneof:"element"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *OfpHelloElemHeader) Reset()         { *m = OfpHelloElemHeader{} }
+func (m *OfpHelloElemHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpHelloElemHeader) ProtoMessage()    {}
+func (*OfpHelloElemHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{1}
+}
+
+func (m *OfpHelloElemHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHelloElemHeader.Unmarshal(m, b)
+}
+func (m *OfpHelloElemHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHelloElemHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpHelloElemHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHelloElemHeader.Merge(m, src)
+}
+func (m *OfpHelloElemHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpHelloElemHeader.Size(m)
+}
+func (m *OfpHelloElemHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHelloElemHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHelloElemHeader proto.InternalMessageInfo
+
+func (m *OfpHelloElemHeader) GetType() OfpHelloElemType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpHelloElemType_OFPHET_INVALID
+}
+
+type isOfpHelloElemHeader_Element interface {
+	isOfpHelloElemHeader_Element()
+}
+
+type OfpHelloElemHeader_Versionbitmap struct {
+	Versionbitmap *OfpHelloElemVersionbitmap `protobuf:"bytes,2,opt,name=versionbitmap,proto3,oneof"`
+}
+
+func (*OfpHelloElemHeader_Versionbitmap) isOfpHelloElemHeader_Element() {}
+
+func (m *OfpHelloElemHeader) GetElement() isOfpHelloElemHeader_Element {
+	if m != nil {
+		return m.Element
+	}
+	return nil
+}
+
+func (m *OfpHelloElemHeader) GetVersionbitmap() *OfpHelloElemVersionbitmap {
+	if x, ok := m.GetElement().(*OfpHelloElemHeader_Versionbitmap); ok {
+		return x.Versionbitmap
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpHelloElemHeader) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpHelloElemHeader_Versionbitmap)(nil),
+	}
+}
+
+// Version bitmap Hello Element
+type OfpHelloElemVersionbitmap struct {
+	Bitmaps              []uint32 `protobuf:"varint,2,rep,packed,name=bitmaps,proto3" json:"bitmaps,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpHelloElemVersionbitmap) Reset()         { *m = OfpHelloElemVersionbitmap{} }
+func (m *OfpHelloElemVersionbitmap) String() string { return proto.CompactTextString(m) }
+func (*OfpHelloElemVersionbitmap) ProtoMessage()    {}
+func (*OfpHelloElemVersionbitmap) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{2}
+}
+
+func (m *OfpHelloElemVersionbitmap) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHelloElemVersionbitmap.Unmarshal(m, b)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHelloElemVersionbitmap.Marshal(b, m, deterministic)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHelloElemVersionbitmap.Merge(m, src)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_Size() int {
+	return xxx_messageInfo_OfpHelloElemVersionbitmap.Size(m)
+}
+func (m *OfpHelloElemVersionbitmap) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHelloElemVersionbitmap.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHelloElemVersionbitmap proto.InternalMessageInfo
+
+func (m *OfpHelloElemVersionbitmap) GetBitmaps() []uint32 {
+	if m != nil {
+		return m.Bitmaps
+	}
+	return nil
+}
+
+// OFPT_HELLO.  This message includes zero or more hello elements having
+// variable size. Unknown elements types must be ignored/skipped, to allow
+// for future extensions.
+type OfpHello struct {
+	// Hello element list
+	Elements             []*OfpHelloElemHeader `protobuf:"bytes,1,rep,name=elements,proto3" json:"elements,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpHello) Reset()         { *m = OfpHello{} }
+func (m *OfpHello) String() string { return proto.CompactTextString(m) }
+func (*OfpHello) ProtoMessage()    {}
+func (*OfpHello) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{3}
+}
+
+func (m *OfpHello) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpHello.Unmarshal(m, b)
+}
+func (m *OfpHello) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpHello.Marshal(b, m, deterministic)
+}
+func (m *OfpHello) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpHello.Merge(m, src)
+}
+func (m *OfpHello) XXX_Size() int {
+	return xxx_messageInfo_OfpHello.Size(m)
+}
+func (m *OfpHello) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpHello.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpHello proto.InternalMessageInfo
+
+func (m *OfpHello) GetElements() []*OfpHelloElemHeader {
+	if m != nil {
+		return m.Elements
+	}
+	return nil
+}
+
+// Switch configuration.
+type OfpSwitchConfig struct {
+	//ofp_header header;
+	Flags                uint32   `protobuf:"varint,1,opt,name=flags,proto3" json:"flags,omitempty"`
+	MissSendLen          uint32   `protobuf:"varint,2,opt,name=miss_send_len,json=missSendLen,proto3" json:"miss_send_len,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpSwitchConfig) Reset()         { *m = OfpSwitchConfig{} }
+func (m *OfpSwitchConfig) String() string { return proto.CompactTextString(m) }
+func (*OfpSwitchConfig) ProtoMessage()    {}
+func (*OfpSwitchConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{4}
+}
+
+func (m *OfpSwitchConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpSwitchConfig.Unmarshal(m, b)
+}
+func (m *OfpSwitchConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpSwitchConfig.Marshal(b, m, deterministic)
+}
+func (m *OfpSwitchConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpSwitchConfig.Merge(m, src)
+}
+func (m *OfpSwitchConfig) XXX_Size() int {
+	return xxx_messageInfo_OfpSwitchConfig.Size(m)
+}
+func (m *OfpSwitchConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpSwitchConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpSwitchConfig proto.InternalMessageInfo
+
+func (m *OfpSwitchConfig) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpSwitchConfig) GetMissSendLen() uint32 {
+	if m != nil {
+		return m.MissSendLen
+	}
+	return 0
+}
+
+// Configure/Modify behavior of a flow table
+type OfpTableMod struct {
+	//ofp_header header;
+	TableId              uint32   `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Config               uint32   `protobuf:"varint,2,opt,name=config,proto3" json:"config,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableMod) Reset()         { *m = OfpTableMod{} }
+func (m *OfpTableMod) String() string { return proto.CompactTextString(m) }
+func (*OfpTableMod) ProtoMessage()    {}
+func (*OfpTableMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{5}
+}
+
+func (m *OfpTableMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableMod.Unmarshal(m, b)
+}
+func (m *OfpTableMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableMod.Marshal(b, m, deterministic)
+}
+func (m *OfpTableMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableMod.Merge(m, src)
+}
+func (m *OfpTableMod) XXX_Size() int {
+	return xxx_messageInfo_OfpTableMod.Size(m)
+}
+func (m *OfpTableMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableMod proto.InternalMessageInfo
+
+func (m *OfpTableMod) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpTableMod) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+// Description of a port
+type OfpPort struct {
+	PortNo uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	HwAddr []uint32 `protobuf:"varint,2,rep,packed,name=hw_addr,json=hwAddr,proto3" json:"hw_addr,omitempty"`
+	Name   string   `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"`
+	Config uint32   `protobuf:"varint,4,opt,name=config,proto3" json:"config,omitempty"`
+	State  uint32   `protobuf:"varint,5,opt,name=state,proto3" json:"state,omitempty"`
+	// Bitmaps of OFPPF_* that describe features.  All bits zeroed if
+	// unsupported or unavailable.
+	Curr                 uint32   `protobuf:"varint,6,opt,name=curr,proto3" json:"curr,omitempty"`
+	Advertised           uint32   `protobuf:"varint,7,opt,name=advertised,proto3" json:"advertised,omitempty"`
+	Supported            uint32   `protobuf:"varint,8,opt,name=supported,proto3" json:"supported,omitempty"`
+	Peer                 uint32   `protobuf:"varint,9,opt,name=peer,proto3" json:"peer,omitempty"`
+	CurrSpeed            uint32   `protobuf:"varint,10,opt,name=curr_speed,json=currSpeed,proto3" json:"curr_speed,omitempty"`
+	MaxSpeed             uint32   `protobuf:"varint,11,opt,name=max_speed,json=maxSpeed,proto3" json:"max_speed,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPort) Reset()         { *m = OfpPort{} }
+func (m *OfpPort) String() string { return proto.CompactTextString(m) }
+func (*OfpPort) ProtoMessage()    {}
+func (*OfpPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{6}
+}
+
+func (m *OfpPort) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPort.Unmarshal(m, b)
+}
+func (m *OfpPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPort.Marshal(b, m, deterministic)
+}
+func (m *OfpPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPort.Merge(m, src)
+}
+func (m *OfpPort) XXX_Size() int {
+	return xxx_messageInfo_OfpPort.Size(m)
+}
+func (m *OfpPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPort proto.InternalMessageInfo
+
+func (m *OfpPort) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpPort) GetHwAddr() []uint32 {
+	if m != nil {
+		return m.HwAddr
+	}
+	return nil
+}
+
+func (m *OfpPort) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *OfpPort) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+func (m *OfpPort) GetState() uint32 {
+	if m != nil {
+		return m.State
+	}
+	return 0
+}
+
+func (m *OfpPort) GetCurr() uint32 {
+	if m != nil {
+		return m.Curr
+	}
+	return 0
+}
+
+func (m *OfpPort) GetAdvertised() uint32 {
+	if m != nil {
+		return m.Advertised
+	}
+	return 0
+}
+
+func (m *OfpPort) GetSupported() uint32 {
+	if m != nil {
+		return m.Supported
+	}
+	return 0
+}
+
+func (m *OfpPort) GetPeer() uint32 {
+	if m != nil {
+		return m.Peer
+	}
+	return 0
+}
+
+func (m *OfpPort) GetCurrSpeed() uint32 {
+	if m != nil {
+		return m.CurrSpeed
+	}
+	return 0
+}
+
+func (m *OfpPort) GetMaxSpeed() uint32 {
+	if m != nil {
+		return m.MaxSpeed
+	}
+	return 0
+}
+
+// Switch features.
+type OfpSwitchFeatures struct {
+	//ofp_header header;
+	DatapathId  uint64 `protobuf:"varint,1,opt,name=datapath_id,json=datapathId,proto3" json:"datapath_id,omitempty"`
+	NBuffers    uint32 `protobuf:"varint,2,opt,name=n_buffers,json=nBuffers,proto3" json:"n_buffers,omitempty"`
+	NTables     uint32 `protobuf:"varint,3,opt,name=n_tables,json=nTables,proto3" json:"n_tables,omitempty"`
+	AuxiliaryId uint32 `protobuf:"varint,4,opt,name=auxiliary_id,json=auxiliaryId,proto3" json:"auxiliary_id,omitempty"`
+	// Features.
+	Capabilities         uint32   `protobuf:"varint,5,opt,name=capabilities,proto3" json:"capabilities,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpSwitchFeatures) Reset()         { *m = OfpSwitchFeatures{} }
+func (m *OfpSwitchFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpSwitchFeatures) ProtoMessage()    {}
+func (*OfpSwitchFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{7}
+}
+
+func (m *OfpSwitchFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpSwitchFeatures.Unmarshal(m, b)
+}
+func (m *OfpSwitchFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpSwitchFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpSwitchFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpSwitchFeatures.Merge(m, src)
+}
+func (m *OfpSwitchFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpSwitchFeatures.Size(m)
+}
+func (m *OfpSwitchFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpSwitchFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpSwitchFeatures proto.InternalMessageInfo
+
+func (m *OfpSwitchFeatures) GetDatapathId() uint64 {
+	if m != nil {
+		return m.DatapathId
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetNBuffers() uint32 {
+	if m != nil {
+		return m.NBuffers
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetNTables() uint32 {
+	if m != nil {
+		return m.NTables
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetAuxiliaryId() uint32 {
+	if m != nil {
+		return m.AuxiliaryId
+	}
+	return 0
+}
+
+func (m *OfpSwitchFeatures) GetCapabilities() uint32 {
+	if m != nil {
+		return m.Capabilities
+	}
+	return 0
+}
+
+// A physical port has changed in the datapath
+type OfpPortStatus struct {
+	//ofp_header header;
+	Reason               OfpPortReason `protobuf:"varint,1,opt,name=reason,proto3,enum=openflow_13.OfpPortReason" json:"reason,omitempty"`
+	Desc                 *OfpPort      `protobuf:"bytes,2,opt,name=desc,proto3" json:"desc,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *OfpPortStatus) Reset()         { *m = OfpPortStatus{} }
+func (m *OfpPortStatus) String() string { return proto.CompactTextString(m) }
+func (*OfpPortStatus) ProtoMessage()    {}
+func (*OfpPortStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{8}
+}
+
+func (m *OfpPortStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortStatus.Unmarshal(m, b)
+}
+func (m *OfpPortStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortStatus.Marshal(b, m, deterministic)
+}
+func (m *OfpPortStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortStatus.Merge(m, src)
+}
+func (m *OfpPortStatus) XXX_Size() int {
+	return xxx_messageInfo_OfpPortStatus.Size(m)
+}
+func (m *OfpPortStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortStatus proto.InternalMessageInfo
+
+func (m *OfpPortStatus) GetReason() OfpPortReason {
+	if m != nil {
+		return m.Reason
+	}
+	return OfpPortReason_OFPPR_ADD
+}
+
+func (m *OfpPortStatus) GetDesc() *OfpPort {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+// A physical device has changed in the datapath
+type OfpDeviceStatus struct {
+	//ofp_header header;
+	Status               OfpDeviceConnection `protobuf:"varint,1,opt,name=status,proto3,enum=openflow_13.OfpDeviceConnection" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpDeviceStatus) Reset()         { *m = OfpDeviceStatus{} }
+func (m *OfpDeviceStatus) String() string { return proto.CompactTextString(m) }
+func (*OfpDeviceStatus) ProtoMessage()    {}
+func (*OfpDeviceStatus) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{9}
+}
+
+func (m *OfpDeviceStatus) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpDeviceStatus.Unmarshal(m, b)
+}
+func (m *OfpDeviceStatus) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpDeviceStatus.Marshal(b, m, deterministic)
+}
+func (m *OfpDeviceStatus) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpDeviceStatus.Merge(m, src)
+}
+func (m *OfpDeviceStatus) XXX_Size() int {
+	return xxx_messageInfo_OfpDeviceStatus.Size(m)
+}
+func (m *OfpDeviceStatus) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpDeviceStatus.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpDeviceStatus proto.InternalMessageInfo
+
+func (m *OfpDeviceStatus) GetStatus() OfpDeviceConnection {
+	if m != nil {
+		return m.Status
+	}
+	return OfpDeviceConnection_OFPDEV_CONNECTED
+}
+
+// Modify behavior of the physical port
+type OfpPortMod struct {
+	//ofp_header header;
+	PortNo uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	HwAddr []uint32 `protobuf:"varint,2,rep,packed,name=hw_addr,json=hwAddr,proto3" json:"hw_addr,omitempty"`
+	// The hardware address is not
+	//configurable.  This is used to
+	//sanity-check the request, so it must
+	//be the same as returned in an
+	//ofp_port struct.
+	Config               uint32   `protobuf:"varint,3,opt,name=config,proto3" json:"config,omitempty"`
+	Mask                 uint32   `protobuf:"varint,4,opt,name=mask,proto3" json:"mask,omitempty"`
+	Advertise            uint32   `protobuf:"varint,5,opt,name=advertise,proto3" json:"advertise,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPortMod) Reset()         { *m = OfpPortMod{} }
+func (m *OfpPortMod) String() string { return proto.CompactTextString(m) }
+func (*OfpPortMod) ProtoMessage()    {}
+func (*OfpPortMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{10}
+}
+
+func (m *OfpPortMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortMod.Unmarshal(m, b)
+}
+func (m *OfpPortMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortMod.Marshal(b, m, deterministic)
+}
+func (m *OfpPortMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortMod.Merge(m, src)
+}
+func (m *OfpPortMod) XXX_Size() int {
+	return xxx_messageInfo_OfpPortMod.Size(m)
+}
+func (m *OfpPortMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortMod proto.InternalMessageInfo
+
+func (m *OfpPortMod) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpPortMod) GetHwAddr() []uint32 {
+	if m != nil {
+		return m.HwAddr
+	}
+	return nil
+}
+
+func (m *OfpPortMod) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+func (m *OfpPortMod) GetMask() uint32 {
+	if m != nil {
+		return m.Mask
+	}
+	return 0
+}
+
+func (m *OfpPortMod) GetAdvertise() uint32 {
+	if m != nil {
+		return m.Advertise
+	}
+	return 0
+}
+
+// Fields to match against flows
+type OfpMatch struct {
+	Type                 OfpMatchType   `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMatchType" json:"type,omitempty"`
+	OxmFields            []*OfpOxmField `protobuf:"bytes,2,rep,name=oxm_fields,json=oxmFields,proto3" json:"oxm_fields,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *OfpMatch) Reset()         { *m = OfpMatch{} }
+func (m *OfpMatch) String() string { return proto.CompactTextString(m) }
+func (*OfpMatch) ProtoMessage()    {}
+func (*OfpMatch) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{11}
+}
+
+func (m *OfpMatch) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMatch.Unmarshal(m, b)
+}
+func (m *OfpMatch) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMatch.Marshal(b, m, deterministic)
+}
+func (m *OfpMatch) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMatch.Merge(m, src)
+}
+func (m *OfpMatch) XXX_Size() int {
+	return xxx_messageInfo_OfpMatch.Size(m)
+}
+func (m *OfpMatch) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMatch.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMatch proto.InternalMessageInfo
+
+func (m *OfpMatch) GetType() OfpMatchType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMatchType_OFPMT_STANDARD
+}
+
+func (m *OfpMatch) GetOxmFields() []*OfpOxmField {
+	if m != nil {
+		return m.OxmFields
+	}
+	return nil
+}
+
+// OXM Flow match fields
+type OfpOxmField struct {
+	OxmClass OfpOxmClass `protobuf:"varint,1,opt,name=oxm_class,json=oxmClass,proto3,enum=openflow_13.OfpOxmClass" json:"oxm_class,omitempty"`
+	// Types that are valid to be assigned to Field:
+	//	*OfpOxmField_OfbField
+	//	*OfpOxmField_ExperimenterField
+	Field                isOfpOxmField_Field `protobuf_oneof:"field"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpOxmField) Reset()         { *m = OfpOxmField{} }
+func (m *OfpOxmField) String() string { return proto.CompactTextString(m) }
+func (*OfpOxmField) ProtoMessage()    {}
+func (*OfpOxmField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{12}
+}
+
+func (m *OfpOxmField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpOxmField.Unmarshal(m, b)
+}
+func (m *OfpOxmField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpOxmField.Marshal(b, m, deterministic)
+}
+func (m *OfpOxmField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpOxmField.Merge(m, src)
+}
+func (m *OfpOxmField) XXX_Size() int {
+	return xxx_messageInfo_OfpOxmField.Size(m)
+}
+func (m *OfpOxmField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpOxmField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpOxmField proto.InternalMessageInfo
+
+func (m *OfpOxmField) GetOxmClass() OfpOxmClass {
+	if m != nil {
+		return m.OxmClass
+	}
+	return OfpOxmClass_OFPXMC_NXM_0
+}
+
+type isOfpOxmField_Field interface {
+	isOfpOxmField_Field()
+}
+
+type OfpOxmField_OfbField struct {
+	OfbField *OfpOxmOfbField `protobuf:"bytes,4,opt,name=ofb_field,json=ofbField,proto3,oneof"`
+}
+
+type OfpOxmField_ExperimenterField struct {
+	ExperimenterField *OfpOxmExperimenterField `protobuf:"bytes,5,opt,name=experimenter_field,json=experimenterField,proto3,oneof"`
+}
+
+func (*OfpOxmField_OfbField) isOfpOxmField_Field() {}
+
+func (*OfpOxmField_ExperimenterField) isOfpOxmField_Field() {}
+
+func (m *OfpOxmField) GetField() isOfpOxmField_Field {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+func (m *OfpOxmField) GetOfbField() *OfpOxmOfbField {
+	if x, ok := m.GetField().(*OfpOxmField_OfbField); ok {
+		return x.OfbField
+	}
+	return nil
+}
+
+func (m *OfpOxmField) GetExperimenterField() *OfpOxmExperimenterField {
+	if x, ok := m.GetField().(*OfpOxmField_ExperimenterField); ok {
+		return x.ExperimenterField
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpOxmField) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpOxmField_OfbField)(nil),
+		(*OfpOxmField_ExperimenterField)(nil),
+	}
+}
+
+// OXM OpenFlow Basic Match Field
+type OfpOxmOfbField struct {
+	Type    OxmOfbFieldTypes `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OxmOfbFieldTypes" json:"type,omitempty"`
+	HasMask bool             `protobuf:"varint,2,opt,name=has_mask,json=hasMask,proto3" json:"has_mask,omitempty"`
+	// Types that are valid to be assigned to Value:
+	//	*OfpOxmOfbField_Port
+	//	*OfpOxmOfbField_PhysicalPort
+	//	*OfpOxmOfbField_TableMetadata
+	//	*OfpOxmOfbField_EthDst
+	//	*OfpOxmOfbField_EthSrc
+	//	*OfpOxmOfbField_EthType
+	//	*OfpOxmOfbField_VlanVid
+	//	*OfpOxmOfbField_VlanPcp
+	//	*OfpOxmOfbField_IpDscp
+	//	*OfpOxmOfbField_IpEcn
+	//	*OfpOxmOfbField_IpProto
+	//	*OfpOxmOfbField_Ipv4Src
+	//	*OfpOxmOfbField_Ipv4Dst
+	//	*OfpOxmOfbField_TcpSrc
+	//	*OfpOxmOfbField_TcpDst
+	//	*OfpOxmOfbField_UdpSrc
+	//	*OfpOxmOfbField_UdpDst
+	//	*OfpOxmOfbField_SctpSrc
+	//	*OfpOxmOfbField_SctpDst
+	//	*OfpOxmOfbField_Icmpv4Type
+	//	*OfpOxmOfbField_Icmpv4Code
+	//	*OfpOxmOfbField_ArpOp
+	//	*OfpOxmOfbField_ArpSpa
+	//	*OfpOxmOfbField_ArpTpa
+	//	*OfpOxmOfbField_ArpSha
+	//	*OfpOxmOfbField_ArpTha
+	//	*OfpOxmOfbField_Ipv6Src
+	//	*OfpOxmOfbField_Ipv6Dst
+	//	*OfpOxmOfbField_Ipv6Flabel
+	//	*OfpOxmOfbField_Icmpv6Type
+	//	*OfpOxmOfbField_Icmpv6Code
+	//	*OfpOxmOfbField_Ipv6NdTarget
+	//	*OfpOxmOfbField_Ipv6NdSsl
+	//	*OfpOxmOfbField_Ipv6NdTll
+	//	*OfpOxmOfbField_MplsLabel
+	//	*OfpOxmOfbField_MplsTc
+	//	*OfpOxmOfbField_MplsBos
+	//	*OfpOxmOfbField_PbbIsid
+	//	*OfpOxmOfbField_TunnelId
+	//	*OfpOxmOfbField_Ipv6Exthdr
+	Value isOfpOxmOfbField_Value `protobuf_oneof:"value"`
+	// Optional mask values (must be present when has_mask is true
+	//
+	// Types that are valid to be assigned to Mask:
+	//	*OfpOxmOfbField_TableMetadataMask
+	//	*OfpOxmOfbField_EthDstMask
+	//	*OfpOxmOfbField_EthSrcMask
+	//	*OfpOxmOfbField_VlanVidMask
+	//	*OfpOxmOfbField_Ipv4SrcMask
+	//	*OfpOxmOfbField_Ipv4DstMask
+	//	*OfpOxmOfbField_ArpSpaMask
+	//	*OfpOxmOfbField_ArpTpaMask
+	//	*OfpOxmOfbField_Ipv6SrcMask
+	//	*OfpOxmOfbField_Ipv6DstMask
+	//	*OfpOxmOfbField_Ipv6FlabelMask
+	//	*OfpOxmOfbField_PbbIsidMask
+	//	*OfpOxmOfbField_TunnelIdMask
+	//	*OfpOxmOfbField_Ipv6ExthdrMask
+	Mask                 isOfpOxmOfbField_Mask `protobuf_oneof:"mask"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpOxmOfbField) Reset()         { *m = OfpOxmOfbField{} }
+func (m *OfpOxmOfbField) String() string { return proto.CompactTextString(m) }
+func (*OfpOxmOfbField) ProtoMessage()    {}
+func (*OfpOxmOfbField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{13}
+}
+
+func (m *OfpOxmOfbField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpOxmOfbField.Unmarshal(m, b)
+}
+func (m *OfpOxmOfbField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpOxmOfbField.Marshal(b, m, deterministic)
+}
+func (m *OfpOxmOfbField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpOxmOfbField.Merge(m, src)
+}
+func (m *OfpOxmOfbField) XXX_Size() int {
+	return xxx_messageInfo_OfpOxmOfbField.Size(m)
+}
+func (m *OfpOxmOfbField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpOxmOfbField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpOxmOfbField proto.InternalMessageInfo
+
+func (m *OfpOxmOfbField) GetType() OxmOfbFieldTypes {
+	if m != nil {
+		return m.Type
+	}
+	return OxmOfbFieldTypes_OFPXMT_OFB_IN_PORT
+}
+
+func (m *OfpOxmOfbField) GetHasMask() bool {
+	if m != nil {
+		return m.HasMask
+	}
+	return false
+}
+
+type isOfpOxmOfbField_Value interface {
+	isOfpOxmOfbField_Value()
+}
+
+type OfpOxmOfbField_Port struct {
+	Port uint32 `protobuf:"varint,3,opt,name=port,proto3,oneof"`
+}
+
+type OfpOxmOfbField_PhysicalPort struct {
+	PhysicalPort uint32 `protobuf:"varint,4,opt,name=physical_port,json=physicalPort,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TableMetadata struct {
+	TableMetadata uint64 `protobuf:"varint,5,opt,name=table_metadata,json=tableMetadata,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthDst struct {
+	EthDst []byte `protobuf:"bytes,6,opt,name=eth_dst,json=ethDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthSrc struct {
+	EthSrc []byte `protobuf:"bytes,7,opt,name=eth_src,json=ethSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthType struct {
+	EthType uint32 `protobuf:"varint,8,opt,name=eth_type,json=ethType,proto3,oneof"`
+}
+
+type OfpOxmOfbField_VlanVid struct {
+	VlanVid uint32 `protobuf:"varint,9,opt,name=vlan_vid,json=vlanVid,proto3,oneof"`
+}
+
+type OfpOxmOfbField_VlanPcp struct {
+	VlanPcp uint32 `protobuf:"varint,10,opt,name=vlan_pcp,json=vlanPcp,proto3,oneof"`
+}
+
+type OfpOxmOfbField_IpDscp struct {
+	IpDscp uint32 `protobuf:"varint,11,opt,name=ip_dscp,json=ipDscp,proto3,oneof"`
+}
+
+type OfpOxmOfbField_IpEcn struct {
+	IpEcn uint32 `protobuf:"varint,12,opt,name=ip_ecn,json=ipEcn,proto3,oneof"`
+}
+
+type OfpOxmOfbField_IpProto struct {
+	IpProto uint32 `protobuf:"varint,13,opt,name=ip_proto,json=ipProto,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4Src struct {
+	Ipv4Src uint32 `protobuf:"varint,14,opt,name=ipv4_src,json=ipv4Src,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4Dst struct {
+	Ipv4Dst uint32 `protobuf:"varint,15,opt,name=ipv4_dst,json=ipv4Dst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TcpSrc struct {
+	TcpSrc uint32 `protobuf:"varint,16,opt,name=tcp_src,json=tcpSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TcpDst struct {
+	TcpDst uint32 `protobuf:"varint,17,opt,name=tcp_dst,json=tcpDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_UdpSrc struct {
+	UdpSrc uint32 `protobuf:"varint,18,opt,name=udp_src,json=udpSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_UdpDst struct {
+	UdpDst uint32 `protobuf:"varint,19,opt,name=udp_dst,json=udpDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_SctpSrc struct {
+	SctpSrc uint32 `protobuf:"varint,20,opt,name=sctp_src,json=sctpSrc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_SctpDst struct {
+	SctpDst uint32 `protobuf:"varint,21,opt,name=sctp_dst,json=sctpDst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv4Type struct {
+	Icmpv4Type uint32 `protobuf:"varint,22,opt,name=icmpv4_type,json=icmpv4Type,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv4Code struct {
+	Icmpv4Code uint32 `protobuf:"varint,23,opt,name=icmpv4_code,json=icmpv4Code,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpOp struct {
+	ArpOp uint32 `protobuf:"varint,24,opt,name=arp_op,json=arpOp,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpSpa struct {
+	ArpSpa uint32 `protobuf:"varint,25,opt,name=arp_spa,json=arpSpa,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpTpa struct {
+	ArpTpa uint32 `protobuf:"varint,26,opt,name=arp_tpa,json=arpTpa,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpSha struct {
+	ArpSha []byte `protobuf:"bytes,27,opt,name=arp_sha,json=arpSha,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpTha struct {
+	ArpTha []byte `protobuf:"bytes,28,opt,name=arp_tha,json=arpTha,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Src struct {
+	Ipv6Src []byte `protobuf:"bytes,29,opt,name=ipv6_src,json=ipv6Src,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Dst struct {
+	Ipv6Dst []byte `protobuf:"bytes,30,opt,name=ipv6_dst,json=ipv6Dst,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Flabel struct {
+	Ipv6Flabel uint32 `protobuf:"varint,31,opt,name=ipv6_flabel,json=ipv6Flabel,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv6Type struct {
+	Icmpv6Type uint32 `protobuf:"varint,32,opt,name=icmpv6_type,json=icmpv6Type,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Icmpv6Code struct {
+	Icmpv6Code uint32 `protobuf:"varint,33,opt,name=icmpv6_code,json=icmpv6Code,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6NdTarget struct {
+	Ipv6NdTarget []byte `protobuf:"bytes,34,opt,name=ipv6_nd_target,json=ipv6NdTarget,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6NdSsl struct {
+	Ipv6NdSsl []byte `protobuf:"bytes,35,opt,name=ipv6_nd_ssl,json=ipv6NdSsl,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6NdTll struct {
+	Ipv6NdTll []byte `protobuf:"bytes,36,opt,name=ipv6_nd_tll,json=ipv6NdTll,proto3,oneof"`
+}
+
+type OfpOxmOfbField_MplsLabel struct {
+	MplsLabel uint32 `protobuf:"varint,37,opt,name=mpls_label,json=mplsLabel,proto3,oneof"`
+}
+
+type OfpOxmOfbField_MplsTc struct {
+	MplsTc uint32 `protobuf:"varint,38,opt,name=mpls_tc,json=mplsTc,proto3,oneof"`
+}
+
+type OfpOxmOfbField_MplsBos struct {
+	MplsBos uint32 `protobuf:"varint,39,opt,name=mpls_bos,json=mplsBos,proto3,oneof"`
+}
+
+type OfpOxmOfbField_PbbIsid struct {
+	PbbIsid uint32 `protobuf:"varint,40,opt,name=pbb_isid,json=pbbIsid,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TunnelId struct {
+	TunnelId uint64 `protobuf:"varint,41,opt,name=tunnel_id,json=tunnelId,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6Exthdr struct {
+	Ipv6Exthdr uint32 `protobuf:"varint,42,opt,name=ipv6_exthdr,json=ipv6Exthdr,proto3,oneof"`
+}
+
+func (*OfpOxmOfbField_Port) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_PhysicalPort) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TableMetadata) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_EthDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_EthSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_EthType) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_VlanVid) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_VlanPcp) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_IpDscp) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_IpEcn) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_IpProto) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv4Src) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv4Dst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TcpSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TcpDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_UdpSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_UdpDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_SctpSrc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_SctpDst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv4Type) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv4Code) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpOp) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpSpa) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpTpa) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpSha) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_ArpTha) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Src) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Dst) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Flabel) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv6Type) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Icmpv6Code) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6NdTarget) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6NdSsl) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6NdTll) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_MplsLabel) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_MplsTc) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_MplsBos) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_PbbIsid) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_TunnelId) isOfpOxmOfbField_Value() {}
+
+func (*OfpOxmOfbField_Ipv6Exthdr) isOfpOxmOfbField_Value() {}
+
+func (m *OfpOxmOfbField) GetValue() isOfpOxmOfbField_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetPort() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Port); ok {
+		return x.Port
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetPhysicalPort() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_PhysicalPort); ok {
+		return x.PhysicalPort
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTableMetadata() uint64 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TableMetadata); ok {
+		return x.TableMetadata
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetEthDst() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_EthDst); ok {
+		return x.EthDst
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetEthSrc() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_EthSrc); ok {
+		return x.EthSrc
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetEthType() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_EthType); ok {
+		return x.EthType
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetVlanVid() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_VlanVid); ok {
+		return x.VlanVid
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetVlanPcp() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_VlanPcp); ok {
+		return x.VlanPcp
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpDscp() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_IpDscp); ok {
+		return x.IpDscp
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpEcn() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_IpEcn); ok {
+		return x.IpEcn
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpProto() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_IpProto); ok {
+		return x.IpProto
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4Src() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv4Src); ok {
+		return x.Ipv4Src
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4Dst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv4Dst); ok {
+		return x.Ipv4Dst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTcpSrc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TcpSrc); ok {
+		return x.TcpSrc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTcpDst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TcpDst); ok {
+		return x.TcpDst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetUdpSrc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_UdpSrc); ok {
+		return x.UdpSrc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetUdpDst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_UdpDst); ok {
+		return x.UdpDst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetSctpSrc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_SctpSrc); ok {
+		return x.SctpSrc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetSctpDst() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_SctpDst); ok {
+		return x.SctpDst
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv4Type() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv4Type); ok {
+		return x.Icmpv4Type
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv4Code() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv4Code); ok {
+		return x.Icmpv4Code
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpOp() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpOp); ok {
+		return x.ArpOp
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpSpa() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpSpa); ok {
+		return x.ArpSpa
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpTpa() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpTpa); ok {
+		return x.ArpTpa
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpSha() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpSha); ok {
+		return x.ArpSha
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetArpTha() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_ArpTha); ok {
+		return x.ArpTha
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6Src() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Src); ok {
+		return x.Ipv6Src
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6Dst() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Dst); ok {
+		return x.Ipv6Dst
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6Flabel() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Flabel); ok {
+		return x.Ipv6Flabel
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv6Type() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv6Type); ok {
+		return x.Icmpv6Type
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIcmpv6Code() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Icmpv6Code); ok {
+		return x.Icmpv6Code
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6NdTarget() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6NdTarget); ok {
+		return x.Ipv6NdTarget
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6NdSsl() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6NdSsl); ok {
+		return x.Ipv6NdSsl
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6NdTll() []byte {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6NdTll); ok {
+		return x.Ipv6NdTll
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetMplsLabel() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_MplsLabel); ok {
+		return x.MplsLabel
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetMplsTc() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_MplsTc); ok {
+		return x.MplsTc
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetMplsBos() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_MplsBos); ok {
+		return x.MplsBos
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetPbbIsid() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_PbbIsid); ok {
+		return x.PbbIsid
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTunnelId() uint64 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_TunnelId); ok {
+		return x.TunnelId
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6Exthdr() uint32 {
+	if x, ok := m.GetValue().(*OfpOxmOfbField_Ipv6Exthdr); ok {
+		return x.Ipv6Exthdr
+	}
+	return 0
+}
+
+type isOfpOxmOfbField_Mask interface {
+	isOfpOxmOfbField_Mask()
+}
+
+type OfpOxmOfbField_TableMetadataMask struct {
+	TableMetadataMask uint64 `protobuf:"varint,105,opt,name=table_metadata_mask,json=tableMetadataMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthDstMask struct {
+	EthDstMask []byte `protobuf:"bytes,106,opt,name=eth_dst_mask,json=ethDstMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_EthSrcMask struct {
+	EthSrcMask []byte `protobuf:"bytes,107,opt,name=eth_src_mask,json=ethSrcMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_VlanVidMask struct {
+	VlanVidMask uint32 `protobuf:"varint,109,opt,name=vlan_vid_mask,json=vlanVidMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4SrcMask struct {
+	Ipv4SrcMask uint32 `protobuf:"varint,114,opt,name=ipv4_src_mask,json=ipv4SrcMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv4DstMask struct {
+	Ipv4DstMask uint32 `protobuf:"varint,115,opt,name=ipv4_dst_mask,json=ipv4DstMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpSpaMask struct {
+	ArpSpaMask uint32 `protobuf:"varint,125,opt,name=arp_spa_mask,json=arpSpaMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_ArpTpaMask struct {
+	ArpTpaMask uint32 `protobuf:"varint,126,opt,name=arp_tpa_mask,json=arpTpaMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6SrcMask struct {
+	Ipv6SrcMask []byte `protobuf:"bytes,129,opt,name=ipv6_src_mask,json=ipv6SrcMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6DstMask struct {
+	Ipv6DstMask []byte `protobuf:"bytes,130,opt,name=ipv6_dst_mask,json=ipv6DstMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6FlabelMask struct {
+	Ipv6FlabelMask uint32 `protobuf:"varint,131,opt,name=ipv6_flabel_mask,json=ipv6FlabelMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_PbbIsidMask struct {
+	PbbIsidMask uint32 `protobuf:"varint,140,opt,name=pbb_isid_mask,json=pbbIsidMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_TunnelIdMask struct {
+	TunnelIdMask uint64 `protobuf:"varint,141,opt,name=tunnel_id_mask,json=tunnelIdMask,proto3,oneof"`
+}
+
+type OfpOxmOfbField_Ipv6ExthdrMask struct {
+	Ipv6ExthdrMask uint32 `protobuf:"varint,142,opt,name=ipv6_exthdr_mask,json=ipv6ExthdrMask,proto3,oneof"`
+}
+
+func (*OfpOxmOfbField_TableMetadataMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_EthDstMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_EthSrcMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_VlanVidMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv4SrcMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv4DstMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_ArpSpaMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_ArpTpaMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6SrcMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6DstMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6FlabelMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_PbbIsidMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_TunnelIdMask) isOfpOxmOfbField_Mask() {}
+
+func (*OfpOxmOfbField_Ipv6ExthdrMask) isOfpOxmOfbField_Mask() {}
+
+func (m *OfpOxmOfbField) GetMask() isOfpOxmOfbField_Mask {
+	if m != nil {
+		return m.Mask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetTableMetadataMask() uint64 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_TableMetadataMask); ok {
+		return x.TableMetadataMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetEthDstMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_EthDstMask); ok {
+		return x.EthDstMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetEthSrcMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_EthSrcMask); ok {
+		return x.EthSrcMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetVlanVidMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_VlanVidMask); ok {
+		return x.VlanVidMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4SrcMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv4SrcMask); ok {
+		return x.Ipv4SrcMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv4DstMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv4DstMask); ok {
+		return x.Ipv4DstMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpSpaMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_ArpSpaMask); ok {
+		return x.ArpSpaMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetArpTpaMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_ArpTpaMask); ok {
+		return x.ArpTpaMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6SrcMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6SrcMask); ok {
+		return x.Ipv6SrcMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6DstMask() []byte {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6DstMask); ok {
+		return x.Ipv6DstMask
+	}
+	return nil
+}
+
+func (m *OfpOxmOfbField) GetIpv6FlabelMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6FlabelMask); ok {
+		return x.Ipv6FlabelMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetPbbIsidMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_PbbIsidMask); ok {
+		return x.PbbIsidMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetTunnelIdMask() uint64 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_TunnelIdMask); ok {
+		return x.TunnelIdMask
+	}
+	return 0
+}
+
+func (m *OfpOxmOfbField) GetIpv6ExthdrMask() uint32 {
+	if x, ok := m.GetMask().(*OfpOxmOfbField_Ipv6ExthdrMask); ok {
+		return x.Ipv6ExthdrMask
+	}
+	return 0
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpOxmOfbField) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpOxmOfbField_Port)(nil),
+		(*OfpOxmOfbField_PhysicalPort)(nil),
+		(*OfpOxmOfbField_TableMetadata)(nil),
+		(*OfpOxmOfbField_EthDst)(nil),
+		(*OfpOxmOfbField_EthSrc)(nil),
+		(*OfpOxmOfbField_EthType)(nil),
+		(*OfpOxmOfbField_VlanVid)(nil),
+		(*OfpOxmOfbField_VlanPcp)(nil),
+		(*OfpOxmOfbField_IpDscp)(nil),
+		(*OfpOxmOfbField_IpEcn)(nil),
+		(*OfpOxmOfbField_IpProto)(nil),
+		(*OfpOxmOfbField_Ipv4Src)(nil),
+		(*OfpOxmOfbField_Ipv4Dst)(nil),
+		(*OfpOxmOfbField_TcpSrc)(nil),
+		(*OfpOxmOfbField_TcpDst)(nil),
+		(*OfpOxmOfbField_UdpSrc)(nil),
+		(*OfpOxmOfbField_UdpDst)(nil),
+		(*OfpOxmOfbField_SctpSrc)(nil),
+		(*OfpOxmOfbField_SctpDst)(nil),
+		(*OfpOxmOfbField_Icmpv4Type)(nil),
+		(*OfpOxmOfbField_Icmpv4Code)(nil),
+		(*OfpOxmOfbField_ArpOp)(nil),
+		(*OfpOxmOfbField_ArpSpa)(nil),
+		(*OfpOxmOfbField_ArpTpa)(nil),
+		(*OfpOxmOfbField_ArpSha)(nil),
+		(*OfpOxmOfbField_ArpTha)(nil),
+		(*OfpOxmOfbField_Ipv6Src)(nil),
+		(*OfpOxmOfbField_Ipv6Dst)(nil),
+		(*OfpOxmOfbField_Ipv6Flabel)(nil),
+		(*OfpOxmOfbField_Icmpv6Type)(nil),
+		(*OfpOxmOfbField_Icmpv6Code)(nil),
+		(*OfpOxmOfbField_Ipv6NdTarget)(nil),
+		(*OfpOxmOfbField_Ipv6NdSsl)(nil),
+		(*OfpOxmOfbField_Ipv6NdTll)(nil),
+		(*OfpOxmOfbField_MplsLabel)(nil),
+		(*OfpOxmOfbField_MplsTc)(nil),
+		(*OfpOxmOfbField_MplsBos)(nil),
+		(*OfpOxmOfbField_PbbIsid)(nil),
+		(*OfpOxmOfbField_TunnelId)(nil),
+		(*OfpOxmOfbField_Ipv6Exthdr)(nil),
+		(*OfpOxmOfbField_TableMetadataMask)(nil),
+		(*OfpOxmOfbField_EthDstMask)(nil),
+		(*OfpOxmOfbField_EthSrcMask)(nil),
+		(*OfpOxmOfbField_VlanVidMask)(nil),
+		(*OfpOxmOfbField_Ipv4SrcMask)(nil),
+		(*OfpOxmOfbField_Ipv4DstMask)(nil),
+		(*OfpOxmOfbField_ArpSpaMask)(nil),
+		(*OfpOxmOfbField_ArpTpaMask)(nil),
+		(*OfpOxmOfbField_Ipv6SrcMask)(nil),
+		(*OfpOxmOfbField_Ipv6DstMask)(nil),
+		(*OfpOxmOfbField_Ipv6FlabelMask)(nil),
+		(*OfpOxmOfbField_PbbIsidMask)(nil),
+		(*OfpOxmOfbField_TunnelIdMask)(nil),
+		(*OfpOxmOfbField_Ipv6ExthdrMask)(nil),
+	}
+}
+
+// Header for OXM experimenter match fields.
+// The experimenter class should not use OXM_HEADER() macros for defining
+// fields due to this extra header.
+type OfpOxmExperimenterField struct {
+	OxmHeader            uint32   `protobuf:"varint,1,opt,name=oxm_header,json=oxmHeader,proto3" json:"oxm_header,omitempty"`
+	Experimenter         uint32   `protobuf:"varint,2,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpOxmExperimenterField) Reset()         { *m = OfpOxmExperimenterField{} }
+func (m *OfpOxmExperimenterField) String() string { return proto.CompactTextString(m) }
+func (*OfpOxmExperimenterField) ProtoMessage()    {}
+func (*OfpOxmExperimenterField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{14}
+}
+
+func (m *OfpOxmExperimenterField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpOxmExperimenterField.Unmarshal(m, b)
+}
+func (m *OfpOxmExperimenterField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpOxmExperimenterField.Marshal(b, m, deterministic)
+}
+func (m *OfpOxmExperimenterField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpOxmExperimenterField.Merge(m, src)
+}
+func (m *OfpOxmExperimenterField) XXX_Size() int {
+	return xxx_messageInfo_OfpOxmExperimenterField.Size(m)
+}
+func (m *OfpOxmExperimenterField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpOxmExperimenterField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpOxmExperimenterField proto.InternalMessageInfo
+
+func (m *OfpOxmExperimenterField) GetOxmHeader() uint32 {
+	if m != nil {
+		return m.OxmHeader
+	}
+	return 0
+}
+
+func (m *OfpOxmExperimenterField) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+// Action header that is common to all actions.  The length includes the
+// header and any padding used to make the action 64-bit aligned.
+// NB: The length of an action *must* always be a multiple of eight.
+type OfpAction struct {
+	Type OfpActionType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpActionType" json:"type,omitempty"`
+	// Types that are valid to be assigned to Action:
+	//	*OfpAction_Output
+	//	*OfpAction_MplsTtl
+	//	*OfpAction_Push
+	//	*OfpAction_PopMpls
+	//	*OfpAction_Group
+	//	*OfpAction_NwTtl
+	//	*OfpAction_SetField
+	//	*OfpAction_Experimenter
+	Action               isOfpAction_Action `protobuf_oneof:"action"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *OfpAction) Reset()         { *m = OfpAction{} }
+func (m *OfpAction) String() string { return proto.CompactTextString(m) }
+func (*OfpAction) ProtoMessage()    {}
+func (*OfpAction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{15}
+}
+
+func (m *OfpAction) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAction.Unmarshal(m, b)
+}
+func (m *OfpAction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAction.Marshal(b, m, deterministic)
+}
+func (m *OfpAction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAction.Merge(m, src)
+}
+func (m *OfpAction) XXX_Size() int {
+	return xxx_messageInfo_OfpAction.Size(m)
+}
+func (m *OfpAction) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAction proto.InternalMessageInfo
+
+func (m *OfpAction) GetType() OfpActionType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpActionType_OFPAT_OUTPUT
+}
+
+type isOfpAction_Action interface {
+	isOfpAction_Action()
+}
+
+type OfpAction_Output struct {
+	Output *OfpActionOutput `protobuf:"bytes,2,opt,name=output,proto3,oneof"`
+}
+
+type OfpAction_MplsTtl struct {
+	MplsTtl *OfpActionMplsTtl `protobuf:"bytes,3,opt,name=mpls_ttl,json=mplsTtl,proto3,oneof"`
+}
+
+type OfpAction_Push struct {
+	Push *OfpActionPush `protobuf:"bytes,4,opt,name=push,proto3,oneof"`
+}
+
+type OfpAction_PopMpls struct {
+	PopMpls *OfpActionPopMpls `protobuf:"bytes,5,opt,name=pop_mpls,json=popMpls,proto3,oneof"`
+}
+
+type OfpAction_Group struct {
+	Group *OfpActionGroup `protobuf:"bytes,6,opt,name=group,proto3,oneof"`
+}
+
+type OfpAction_NwTtl struct {
+	NwTtl *OfpActionNwTtl `protobuf:"bytes,7,opt,name=nw_ttl,json=nwTtl,proto3,oneof"`
+}
+
+type OfpAction_SetField struct {
+	SetField *OfpActionSetField `protobuf:"bytes,8,opt,name=set_field,json=setField,proto3,oneof"`
+}
+
+type OfpAction_Experimenter struct {
+	Experimenter *OfpActionExperimenter `protobuf:"bytes,9,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpAction_Output) isOfpAction_Action() {}
+
+func (*OfpAction_MplsTtl) isOfpAction_Action() {}
+
+func (*OfpAction_Push) isOfpAction_Action() {}
+
+func (*OfpAction_PopMpls) isOfpAction_Action() {}
+
+func (*OfpAction_Group) isOfpAction_Action() {}
+
+func (*OfpAction_NwTtl) isOfpAction_Action() {}
+
+func (*OfpAction_SetField) isOfpAction_Action() {}
+
+func (*OfpAction_Experimenter) isOfpAction_Action() {}
+
+func (m *OfpAction) GetAction() isOfpAction_Action {
+	if m != nil {
+		return m.Action
+	}
+	return nil
+}
+
+func (m *OfpAction) GetOutput() *OfpActionOutput {
+	if x, ok := m.GetAction().(*OfpAction_Output); ok {
+		return x.Output
+	}
+	return nil
+}
+
+func (m *OfpAction) GetMplsTtl() *OfpActionMplsTtl {
+	if x, ok := m.GetAction().(*OfpAction_MplsTtl); ok {
+		return x.MplsTtl
+	}
+	return nil
+}
+
+func (m *OfpAction) GetPush() *OfpActionPush {
+	if x, ok := m.GetAction().(*OfpAction_Push); ok {
+		return x.Push
+	}
+	return nil
+}
+
+func (m *OfpAction) GetPopMpls() *OfpActionPopMpls {
+	if x, ok := m.GetAction().(*OfpAction_PopMpls); ok {
+		return x.PopMpls
+	}
+	return nil
+}
+
+func (m *OfpAction) GetGroup() *OfpActionGroup {
+	if x, ok := m.GetAction().(*OfpAction_Group); ok {
+		return x.Group
+	}
+	return nil
+}
+
+func (m *OfpAction) GetNwTtl() *OfpActionNwTtl {
+	if x, ok := m.GetAction().(*OfpAction_NwTtl); ok {
+		return x.NwTtl
+	}
+	return nil
+}
+
+func (m *OfpAction) GetSetField() *OfpActionSetField {
+	if x, ok := m.GetAction().(*OfpAction_SetField); ok {
+		return x.SetField
+	}
+	return nil
+}
+
+func (m *OfpAction) GetExperimenter() *OfpActionExperimenter {
+	if x, ok := m.GetAction().(*OfpAction_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpAction) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpAction_Output)(nil),
+		(*OfpAction_MplsTtl)(nil),
+		(*OfpAction_Push)(nil),
+		(*OfpAction_PopMpls)(nil),
+		(*OfpAction_Group)(nil),
+		(*OfpAction_NwTtl)(nil),
+		(*OfpAction_SetField)(nil),
+		(*OfpAction_Experimenter)(nil),
+	}
+}
+
+// Action structure for OFPAT_OUTPUT, which sends packets out 'port'.
+// When the 'port' is the OFPP_CONTROLLER, 'max_len' indicates the max
+// number of bytes to send.  A 'max_len' of zero means no bytes of the
+// packet should be sent. A 'max_len' of OFPCML_NO_BUFFER means that
+// the packet is not buffered and the complete packet is to be sent to
+// the controller.
+type OfpActionOutput struct {
+	Port                 uint32   `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	MaxLen               uint32   `protobuf:"varint,2,opt,name=max_len,json=maxLen,proto3" json:"max_len,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionOutput) Reset()         { *m = OfpActionOutput{} }
+func (m *OfpActionOutput) String() string { return proto.CompactTextString(m) }
+func (*OfpActionOutput) ProtoMessage()    {}
+func (*OfpActionOutput) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{16}
+}
+
+func (m *OfpActionOutput) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionOutput.Unmarshal(m, b)
+}
+func (m *OfpActionOutput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionOutput.Marshal(b, m, deterministic)
+}
+func (m *OfpActionOutput) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionOutput.Merge(m, src)
+}
+func (m *OfpActionOutput) XXX_Size() int {
+	return xxx_messageInfo_OfpActionOutput.Size(m)
+}
+func (m *OfpActionOutput) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionOutput.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionOutput proto.InternalMessageInfo
+
+func (m *OfpActionOutput) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *OfpActionOutput) GetMaxLen() uint32 {
+	if m != nil {
+		return m.MaxLen
+	}
+	return 0
+}
+
+// Action structure for OFPAT_SET_MPLS_TTL.
+type OfpActionMplsTtl struct {
+	MplsTtl              uint32   `protobuf:"varint,1,opt,name=mpls_ttl,json=mplsTtl,proto3" json:"mpls_ttl,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionMplsTtl) Reset()         { *m = OfpActionMplsTtl{} }
+func (m *OfpActionMplsTtl) String() string { return proto.CompactTextString(m) }
+func (*OfpActionMplsTtl) ProtoMessage()    {}
+func (*OfpActionMplsTtl) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{17}
+}
+
+func (m *OfpActionMplsTtl) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionMplsTtl.Unmarshal(m, b)
+}
+func (m *OfpActionMplsTtl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionMplsTtl.Marshal(b, m, deterministic)
+}
+func (m *OfpActionMplsTtl) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionMplsTtl.Merge(m, src)
+}
+func (m *OfpActionMplsTtl) XXX_Size() int {
+	return xxx_messageInfo_OfpActionMplsTtl.Size(m)
+}
+func (m *OfpActionMplsTtl) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionMplsTtl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionMplsTtl proto.InternalMessageInfo
+
+func (m *OfpActionMplsTtl) GetMplsTtl() uint32 {
+	if m != nil {
+		return m.MplsTtl
+	}
+	return 0
+}
+
+// Action structure for OFPAT_PUSH_VLAN/MPLS/PBB.
+type OfpActionPush struct {
+	Ethertype            uint32   `protobuf:"varint,1,opt,name=ethertype,proto3" json:"ethertype,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionPush) Reset()         { *m = OfpActionPush{} }
+func (m *OfpActionPush) String() string { return proto.CompactTextString(m) }
+func (*OfpActionPush) ProtoMessage()    {}
+func (*OfpActionPush) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{18}
+}
+
+func (m *OfpActionPush) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionPush.Unmarshal(m, b)
+}
+func (m *OfpActionPush) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionPush.Marshal(b, m, deterministic)
+}
+func (m *OfpActionPush) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionPush.Merge(m, src)
+}
+func (m *OfpActionPush) XXX_Size() int {
+	return xxx_messageInfo_OfpActionPush.Size(m)
+}
+func (m *OfpActionPush) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionPush.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionPush proto.InternalMessageInfo
+
+func (m *OfpActionPush) GetEthertype() uint32 {
+	if m != nil {
+		return m.Ethertype
+	}
+	return 0
+}
+
+// Action structure for OFPAT_POP_MPLS.
+type OfpActionPopMpls struct {
+	Ethertype            uint32   `protobuf:"varint,1,opt,name=ethertype,proto3" json:"ethertype,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionPopMpls) Reset()         { *m = OfpActionPopMpls{} }
+func (m *OfpActionPopMpls) String() string { return proto.CompactTextString(m) }
+func (*OfpActionPopMpls) ProtoMessage()    {}
+func (*OfpActionPopMpls) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{19}
+}
+
+func (m *OfpActionPopMpls) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionPopMpls.Unmarshal(m, b)
+}
+func (m *OfpActionPopMpls) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionPopMpls.Marshal(b, m, deterministic)
+}
+func (m *OfpActionPopMpls) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionPopMpls.Merge(m, src)
+}
+func (m *OfpActionPopMpls) XXX_Size() int {
+	return xxx_messageInfo_OfpActionPopMpls.Size(m)
+}
+func (m *OfpActionPopMpls) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionPopMpls.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionPopMpls proto.InternalMessageInfo
+
+func (m *OfpActionPopMpls) GetEthertype() uint32 {
+	if m != nil {
+		return m.Ethertype
+	}
+	return 0
+}
+
+// Action structure for OFPAT_GROUP.
+type OfpActionGroup struct {
+	GroupId              uint32   `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionGroup) Reset()         { *m = OfpActionGroup{} }
+func (m *OfpActionGroup) String() string { return proto.CompactTextString(m) }
+func (*OfpActionGroup) ProtoMessage()    {}
+func (*OfpActionGroup) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{20}
+}
+
+func (m *OfpActionGroup) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionGroup.Unmarshal(m, b)
+}
+func (m *OfpActionGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionGroup.Marshal(b, m, deterministic)
+}
+func (m *OfpActionGroup) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionGroup.Merge(m, src)
+}
+func (m *OfpActionGroup) XXX_Size() int {
+	return xxx_messageInfo_OfpActionGroup.Size(m)
+}
+func (m *OfpActionGroup) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionGroup.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionGroup proto.InternalMessageInfo
+
+func (m *OfpActionGroup) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+// Action structure for OFPAT_SET_NW_TTL.
+type OfpActionNwTtl struct {
+	NwTtl                uint32   `protobuf:"varint,1,opt,name=nw_ttl,json=nwTtl,proto3" json:"nw_ttl,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionNwTtl) Reset()         { *m = OfpActionNwTtl{} }
+func (m *OfpActionNwTtl) String() string { return proto.CompactTextString(m) }
+func (*OfpActionNwTtl) ProtoMessage()    {}
+func (*OfpActionNwTtl) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{21}
+}
+
+func (m *OfpActionNwTtl) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionNwTtl.Unmarshal(m, b)
+}
+func (m *OfpActionNwTtl) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionNwTtl.Marshal(b, m, deterministic)
+}
+func (m *OfpActionNwTtl) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionNwTtl.Merge(m, src)
+}
+func (m *OfpActionNwTtl) XXX_Size() int {
+	return xxx_messageInfo_OfpActionNwTtl.Size(m)
+}
+func (m *OfpActionNwTtl) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionNwTtl.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionNwTtl proto.InternalMessageInfo
+
+func (m *OfpActionNwTtl) GetNwTtl() uint32 {
+	if m != nil {
+		return m.NwTtl
+	}
+	return 0
+}
+
+// Action structure for OFPAT_SET_FIELD.
+type OfpActionSetField struct {
+	Field                *OfpOxmField `protobuf:"bytes,1,opt,name=field,proto3" json:"field,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpActionSetField) Reset()         { *m = OfpActionSetField{} }
+func (m *OfpActionSetField) String() string { return proto.CompactTextString(m) }
+func (*OfpActionSetField) ProtoMessage()    {}
+func (*OfpActionSetField) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{22}
+}
+
+func (m *OfpActionSetField) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionSetField.Unmarshal(m, b)
+}
+func (m *OfpActionSetField) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionSetField.Marshal(b, m, deterministic)
+}
+func (m *OfpActionSetField) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionSetField.Merge(m, src)
+}
+func (m *OfpActionSetField) XXX_Size() int {
+	return xxx_messageInfo_OfpActionSetField.Size(m)
+}
+func (m *OfpActionSetField) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionSetField.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionSetField proto.InternalMessageInfo
+
+func (m *OfpActionSetField) GetField() *OfpOxmField {
+	if m != nil {
+		return m.Field
+	}
+	return nil
+}
+
+// Action header for OFPAT_EXPERIMENTER.
+// The rest of the body is experimenter-defined.
+type OfpActionExperimenter struct {
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionExperimenter) Reset()         { *m = OfpActionExperimenter{} }
+func (m *OfpActionExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpActionExperimenter) ProtoMessage()    {}
+func (*OfpActionExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{23}
+}
+
+func (m *OfpActionExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionExperimenter.Unmarshal(m, b)
+}
+func (m *OfpActionExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpActionExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionExperimenter.Merge(m, src)
+}
+func (m *OfpActionExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpActionExperimenter.Size(m)
+}
+func (m *OfpActionExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionExperimenter proto.InternalMessageInfo
+
+func (m *OfpActionExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpActionExperimenter) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Instruction header that is common to all instructions.  The length includes
+// the header and any padding used to make the instruction 64-bit aligned.
+// NB: The length of an instruction *must* always be a multiple of eight.
+type OfpInstruction struct {
+	Type uint32 `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	// Types that are valid to be assigned to Data:
+	//	*OfpInstruction_GotoTable
+	//	*OfpInstruction_WriteMetadata
+	//	*OfpInstruction_Actions
+	//	*OfpInstruction_Meter
+	//	*OfpInstruction_Experimenter
+	Data                 isOfpInstruction_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpInstruction) Reset()         { *m = OfpInstruction{} }
+func (m *OfpInstruction) String() string { return proto.CompactTextString(m) }
+func (*OfpInstruction) ProtoMessage()    {}
+func (*OfpInstruction) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{24}
+}
+
+func (m *OfpInstruction) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstruction.Unmarshal(m, b)
+}
+func (m *OfpInstruction) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstruction.Marshal(b, m, deterministic)
+}
+func (m *OfpInstruction) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstruction.Merge(m, src)
+}
+func (m *OfpInstruction) XXX_Size() int {
+	return xxx_messageInfo_OfpInstruction.Size(m)
+}
+func (m *OfpInstruction) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstruction.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstruction proto.InternalMessageInfo
+
+func (m *OfpInstruction) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+type isOfpInstruction_Data interface {
+	isOfpInstruction_Data()
+}
+
+type OfpInstruction_GotoTable struct {
+	GotoTable *OfpInstructionGotoTable `protobuf:"bytes,2,opt,name=goto_table,json=gotoTable,proto3,oneof"`
+}
+
+type OfpInstruction_WriteMetadata struct {
+	WriteMetadata *OfpInstructionWriteMetadata `protobuf:"bytes,3,opt,name=write_metadata,json=writeMetadata,proto3,oneof"`
+}
+
+type OfpInstruction_Actions struct {
+	Actions *OfpInstructionActions `protobuf:"bytes,4,opt,name=actions,proto3,oneof"`
+}
+
+type OfpInstruction_Meter struct {
+	Meter *OfpInstructionMeter `protobuf:"bytes,5,opt,name=meter,proto3,oneof"`
+}
+
+type OfpInstruction_Experimenter struct {
+	Experimenter *OfpInstructionExperimenter `protobuf:"bytes,6,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpInstruction_GotoTable) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_WriteMetadata) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_Actions) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_Meter) isOfpInstruction_Data() {}
+
+func (*OfpInstruction_Experimenter) isOfpInstruction_Data() {}
+
+func (m *OfpInstruction) GetData() isOfpInstruction_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetGotoTable() *OfpInstructionGotoTable {
+	if x, ok := m.GetData().(*OfpInstruction_GotoTable); ok {
+		return x.GotoTable
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetWriteMetadata() *OfpInstructionWriteMetadata {
+	if x, ok := m.GetData().(*OfpInstruction_WriteMetadata); ok {
+		return x.WriteMetadata
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetActions() *OfpInstructionActions {
+	if x, ok := m.GetData().(*OfpInstruction_Actions); ok {
+		return x.Actions
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetMeter() *OfpInstructionMeter {
+	if x, ok := m.GetData().(*OfpInstruction_Meter); ok {
+		return x.Meter
+	}
+	return nil
+}
+
+func (m *OfpInstruction) GetExperimenter() *OfpInstructionExperimenter {
+	if x, ok := m.GetData().(*OfpInstruction_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpInstruction) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpInstruction_GotoTable)(nil),
+		(*OfpInstruction_WriteMetadata)(nil),
+		(*OfpInstruction_Actions)(nil),
+		(*OfpInstruction_Meter)(nil),
+		(*OfpInstruction_Experimenter)(nil),
+	}
+}
+
+// Instruction structure for OFPIT_GOTO_TABLE
+type OfpInstructionGotoTable struct {
+	TableId              uint32   `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionGotoTable) Reset()         { *m = OfpInstructionGotoTable{} }
+func (m *OfpInstructionGotoTable) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionGotoTable) ProtoMessage()    {}
+func (*OfpInstructionGotoTable) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{25}
+}
+
+func (m *OfpInstructionGotoTable) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionGotoTable.Unmarshal(m, b)
+}
+func (m *OfpInstructionGotoTable) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionGotoTable.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionGotoTable) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionGotoTable.Merge(m, src)
+}
+func (m *OfpInstructionGotoTable) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionGotoTable.Size(m)
+}
+func (m *OfpInstructionGotoTable) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionGotoTable.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionGotoTable proto.InternalMessageInfo
+
+func (m *OfpInstructionGotoTable) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+// Instruction structure for OFPIT_WRITE_METADATA
+type OfpInstructionWriteMetadata struct {
+	Metadata             uint64   `protobuf:"varint,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	MetadataMask         uint64   `protobuf:"varint,2,opt,name=metadata_mask,json=metadataMask,proto3" json:"metadata_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionWriteMetadata) Reset()         { *m = OfpInstructionWriteMetadata{} }
+func (m *OfpInstructionWriteMetadata) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionWriteMetadata) ProtoMessage()    {}
+func (*OfpInstructionWriteMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{26}
+}
+
+func (m *OfpInstructionWriteMetadata) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionWriteMetadata.Unmarshal(m, b)
+}
+func (m *OfpInstructionWriteMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionWriteMetadata.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionWriteMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionWriteMetadata.Merge(m, src)
+}
+func (m *OfpInstructionWriteMetadata) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionWriteMetadata.Size(m)
+}
+func (m *OfpInstructionWriteMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionWriteMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionWriteMetadata proto.InternalMessageInfo
+
+func (m *OfpInstructionWriteMetadata) GetMetadata() uint64 {
+	if m != nil {
+		return m.Metadata
+	}
+	return 0
+}
+
+func (m *OfpInstructionWriteMetadata) GetMetadataMask() uint64 {
+	if m != nil {
+		return m.MetadataMask
+	}
+	return 0
+}
+
+// Instruction structure for OFPIT_WRITE/APPLY/CLEAR_ACTIONS
+type OfpInstructionActions struct {
+	Actions              []*OfpAction `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpInstructionActions) Reset()         { *m = OfpInstructionActions{} }
+func (m *OfpInstructionActions) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionActions) ProtoMessage()    {}
+func (*OfpInstructionActions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{27}
+}
+
+func (m *OfpInstructionActions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionActions.Unmarshal(m, b)
+}
+func (m *OfpInstructionActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionActions.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionActions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionActions.Merge(m, src)
+}
+func (m *OfpInstructionActions) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionActions.Size(m)
+}
+func (m *OfpInstructionActions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionActions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionActions proto.InternalMessageInfo
+
+func (m *OfpInstructionActions) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Instruction structure for OFPIT_METER
+type OfpInstructionMeter struct {
+	MeterId              uint32   `protobuf:"varint,1,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionMeter) Reset()         { *m = OfpInstructionMeter{} }
+func (m *OfpInstructionMeter) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionMeter) ProtoMessage()    {}
+func (*OfpInstructionMeter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{28}
+}
+
+func (m *OfpInstructionMeter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionMeter.Unmarshal(m, b)
+}
+func (m *OfpInstructionMeter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionMeter.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionMeter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionMeter.Merge(m, src)
+}
+func (m *OfpInstructionMeter) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionMeter.Size(m)
+}
+func (m *OfpInstructionMeter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionMeter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionMeter proto.InternalMessageInfo
+
+func (m *OfpInstructionMeter) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+// Instruction structure for experimental instructions
+type OfpInstructionExperimenter struct {
+	Experimenter uint32 `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	// Experimenter-defined arbitrary additional data.
+	Data                 []byte   `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpInstructionExperimenter) Reset()         { *m = OfpInstructionExperimenter{} }
+func (m *OfpInstructionExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpInstructionExperimenter) ProtoMessage()    {}
+func (*OfpInstructionExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{29}
+}
+
+func (m *OfpInstructionExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpInstructionExperimenter.Unmarshal(m, b)
+}
+func (m *OfpInstructionExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpInstructionExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpInstructionExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpInstructionExperimenter.Merge(m, src)
+}
+func (m *OfpInstructionExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpInstructionExperimenter.Size(m)
+}
+func (m *OfpInstructionExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpInstructionExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpInstructionExperimenter proto.InternalMessageInfo
+
+func (m *OfpInstructionExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpInstructionExperimenter) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Flow setup and teardown (controller -> datapath).
+type OfpFlowMod struct {
+	//ofp_header header;
+	Cookie               uint64            `protobuf:"varint,1,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	CookieMask           uint64            `protobuf:"varint,2,opt,name=cookie_mask,json=cookieMask,proto3" json:"cookie_mask,omitempty"`
+	TableId              uint32            `protobuf:"varint,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Command              OfpFlowModCommand `protobuf:"varint,4,opt,name=command,proto3,enum=openflow_13.OfpFlowModCommand" json:"command,omitempty"`
+	IdleTimeout          uint32            `protobuf:"varint,5,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+	HardTimeout          uint32            `protobuf:"varint,6,opt,name=hard_timeout,json=hardTimeout,proto3" json:"hard_timeout,omitempty"`
+	Priority             uint32            `protobuf:"varint,7,opt,name=priority,proto3" json:"priority,omitempty"`
+	BufferId             uint32            `protobuf:"varint,8,opt,name=buffer_id,json=bufferId,proto3" json:"buffer_id,omitempty"`
+	OutPort              uint32            `protobuf:"varint,9,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	OutGroup             uint32            `protobuf:"varint,10,opt,name=out_group,json=outGroup,proto3" json:"out_group,omitempty"`
+	Flags                uint32            `protobuf:"varint,11,opt,name=flags,proto3" json:"flags,omitempty"`
+	Match                *OfpMatch         `protobuf:"bytes,12,opt,name=match,proto3" json:"match,omitempty"`
+	Instructions         []*OfpInstruction `protobuf:"bytes,13,rep,name=instructions,proto3" json:"instructions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpFlowMod) Reset()         { *m = OfpFlowMod{} }
+func (m *OfpFlowMod) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowMod) ProtoMessage()    {}
+func (*OfpFlowMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{30}
+}
+
+func (m *OfpFlowMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowMod.Unmarshal(m, b)
+}
+func (m *OfpFlowMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowMod.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowMod.Merge(m, src)
+}
+func (m *OfpFlowMod) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowMod.Size(m)
+}
+func (m *OfpFlowMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowMod proto.InternalMessageInfo
+
+func (m *OfpFlowMod) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetCookieMask() uint64 {
+	if m != nil {
+		return m.CookieMask
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetCommand() OfpFlowModCommand {
+	if m != nil {
+		return m.Command
+	}
+	return OfpFlowModCommand_OFPFC_ADD
+}
+
+func (m *OfpFlowMod) GetIdleTimeout() uint32 {
+	if m != nil {
+		return m.IdleTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetHardTimeout() uint32 {
+	if m != nil {
+		return m.HardTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetBufferId() uint32 {
+	if m != nil {
+		return m.BufferId
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetOutPort() uint32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetOutGroup() uint32 {
+	if m != nil {
+		return m.OutGroup
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpFlowMod) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+func (m *OfpFlowMod) GetInstructions() []*OfpInstruction {
+	if m != nil {
+		return m.Instructions
+	}
+	return nil
+}
+
+// Bucket for use in groups.
+type OfpBucket struct {
+	Weight               uint32       `protobuf:"varint,1,opt,name=weight,proto3" json:"weight,omitempty"`
+	WatchPort            uint32       `protobuf:"varint,2,opt,name=watch_port,json=watchPort,proto3" json:"watch_port,omitempty"`
+	WatchGroup           uint32       `protobuf:"varint,3,opt,name=watch_group,json=watchGroup,proto3" json:"watch_group,omitempty"`
+	Actions              []*OfpAction `protobuf:"bytes,4,rep,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpBucket) Reset()         { *m = OfpBucket{} }
+func (m *OfpBucket) String() string { return proto.CompactTextString(m) }
+func (*OfpBucket) ProtoMessage()    {}
+func (*OfpBucket) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{31}
+}
+
+func (m *OfpBucket) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpBucket.Unmarshal(m, b)
+}
+func (m *OfpBucket) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpBucket.Marshal(b, m, deterministic)
+}
+func (m *OfpBucket) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpBucket.Merge(m, src)
+}
+func (m *OfpBucket) XXX_Size() int {
+	return xxx_messageInfo_OfpBucket.Size(m)
+}
+func (m *OfpBucket) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpBucket.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpBucket proto.InternalMessageInfo
+
+func (m *OfpBucket) GetWeight() uint32 {
+	if m != nil {
+		return m.Weight
+	}
+	return 0
+}
+
+func (m *OfpBucket) GetWatchPort() uint32 {
+	if m != nil {
+		return m.WatchPort
+	}
+	return 0
+}
+
+func (m *OfpBucket) GetWatchGroup() uint32 {
+	if m != nil {
+		return m.WatchGroup
+	}
+	return 0
+}
+
+func (m *OfpBucket) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Group setup and teardown (controller -> datapath).
+type OfpGroupMod struct {
+	//ofp_header header;
+	Command              OfpGroupModCommand `protobuf:"varint,1,opt,name=command,proto3,enum=openflow_13.OfpGroupModCommand" json:"command,omitempty"`
+	Type                 OfpGroupType       `protobuf:"varint,2,opt,name=type,proto3,enum=openflow_13.OfpGroupType" json:"type,omitempty"`
+	GroupId              uint32             `protobuf:"varint,3,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	Buckets              []*OfpBucket       `protobuf:"bytes,4,rep,name=buckets,proto3" json:"buckets,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *OfpGroupMod) Reset()         { *m = OfpGroupMod{} }
+func (m *OfpGroupMod) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupMod) ProtoMessage()    {}
+func (*OfpGroupMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{32}
+}
+
+func (m *OfpGroupMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupMod.Unmarshal(m, b)
+}
+func (m *OfpGroupMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupMod.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupMod.Merge(m, src)
+}
+func (m *OfpGroupMod) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupMod.Size(m)
+}
+func (m *OfpGroupMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupMod proto.InternalMessageInfo
+
+func (m *OfpGroupMod) GetCommand() OfpGroupModCommand {
+	if m != nil {
+		return m.Command
+	}
+	return OfpGroupModCommand_OFPGC_ADD
+}
+
+func (m *OfpGroupMod) GetType() OfpGroupType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpGroupType_OFPGT_ALL
+}
+
+func (m *OfpGroupMod) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *OfpGroupMod) GetBuckets() []*OfpBucket {
+	if m != nil {
+		return m.Buckets
+	}
+	return nil
+}
+
+// Send packet (controller -> datapath).
+type OfpPacketOut struct {
+	//ofp_header header;
+	BufferId uint32       `protobuf:"varint,1,opt,name=buffer_id,json=bufferId,proto3" json:"buffer_id,omitempty"`
+	InPort   uint32       `protobuf:"varint,2,opt,name=in_port,json=inPort,proto3" json:"in_port,omitempty"`
+	Actions  []*OfpAction `protobuf:"bytes,3,rep,name=actions,proto3" json:"actions,omitempty"`
+	// The variable size action list is optionally followed by packet data.
+	// This data is only present and meaningful if buffer_id == -1.
+	Data                 []byte   `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPacketOut) Reset()         { *m = OfpPacketOut{} }
+func (m *OfpPacketOut) String() string { return proto.CompactTextString(m) }
+func (*OfpPacketOut) ProtoMessage()    {}
+func (*OfpPacketOut) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{33}
+}
+
+func (m *OfpPacketOut) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPacketOut.Unmarshal(m, b)
+}
+func (m *OfpPacketOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPacketOut.Marshal(b, m, deterministic)
+}
+func (m *OfpPacketOut) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPacketOut.Merge(m, src)
+}
+func (m *OfpPacketOut) XXX_Size() int {
+	return xxx_messageInfo_OfpPacketOut.Size(m)
+}
+func (m *OfpPacketOut) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPacketOut.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPacketOut proto.InternalMessageInfo
+
+func (m *OfpPacketOut) GetBufferId() uint32 {
+	if m != nil {
+		return m.BufferId
+	}
+	return 0
+}
+
+func (m *OfpPacketOut) GetInPort() uint32 {
+	if m != nil {
+		return m.InPort
+	}
+	return 0
+}
+
+func (m *OfpPacketOut) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+func (m *OfpPacketOut) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Packet received on port (datapath -> controller).
+type OfpPacketIn struct {
+	//ofp_header header;
+	BufferId             uint32            `protobuf:"varint,1,opt,name=buffer_id,json=bufferId,proto3" json:"buffer_id,omitempty"`
+	Reason               OfpPacketInReason `protobuf:"varint,2,opt,name=reason,proto3,enum=openflow_13.OfpPacketInReason" json:"reason,omitempty"`
+	TableId              uint32            `protobuf:"varint,3,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Cookie               uint64            `protobuf:"varint,4,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	Match                *OfpMatch         `protobuf:"bytes,5,opt,name=match,proto3" json:"match,omitempty"`
+	Data                 []byte            `protobuf:"bytes,6,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpPacketIn) Reset()         { *m = OfpPacketIn{} }
+func (m *OfpPacketIn) String() string { return proto.CompactTextString(m) }
+func (*OfpPacketIn) ProtoMessage()    {}
+func (*OfpPacketIn) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{34}
+}
+
+func (m *OfpPacketIn) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPacketIn.Unmarshal(m, b)
+}
+func (m *OfpPacketIn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPacketIn.Marshal(b, m, deterministic)
+}
+func (m *OfpPacketIn) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPacketIn.Merge(m, src)
+}
+func (m *OfpPacketIn) XXX_Size() int {
+	return xxx_messageInfo_OfpPacketIn.Size(m)
+}
+func (m *OfpPacketIn) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPacketIn.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPacketIn proto.InternalMessageInfo
+
+func (m *OfpPacketIn) GetBufferId() uint32 {
+	if m != nil {
+		return m.BufferId
+	}
+	return 0
+}
+
+func (m *OfpPacketIn) GetReason() OfpPacketInReason {
+	if m != nil {
+		return m.Reason
+	}
+	return OfpPacketInReason_OFPR_NO_MATCH
+}
+
+func (m *OfpPacketIn) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpPacketIn) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpPacketIn) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+func (m *OfpPacketIn) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Flow removed (datapath -> controller).
+type OfpFlowRemoved struct {
+	//ofp_header header;
+	Cookie               uint64               `protobuf:"varint,1,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	Priority             uint32               `protobuf:"varint,2,opt,name=priority,proto3" json:"priority,omitempty"`
+	Reason               OfpFlowRemovedReason `protobuf:"varint,3,opt,name=reason,proto3,enum=openflow_13.OfpFlowRemovedReason" json:"reason,omitempty"`
+	TableId              uint32               `protobuf:"varint,4,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	DurationSec          uint32               `protobuf:"varint,5,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32               `protobuf:"varint,6,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	IdleTimeout          uint32               `protobuf:"varint,7,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+	HardTimeout          uint32               `protobuf:"varint,8,opt,name=hard_timeout,json=hardTimeout,proto3" json:"hard_timeout,omitempty"`
+	PacketCount          uint64               `protobuf:"varint,9,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64               `protobuf:"varint,10,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	Match                *OfpMatch            `protobuf:"bytes,121,opt,name=match,proto3" json:"match,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *OfpFlowRemoved) Reset()         { *m = OfpFlowRemoved{} }
+func (m *OfpFlowRemoved) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowRemoved) ProtoMessage()    {}
+func (*OfpFlowRemoved) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{35}
+}
+
+func (m *OfpFlowRemoved) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowRemoved.Unmarshal(m, b)
+}
+func (m *OfpFlowRemoved) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowRemoved.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowRemoved) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowRemoved.Merge(m, src)
+}
+func (m *OfpFlowRemoved) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowRemoved.Size(m)
+}
+func (m *OfpFlowRemoved) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowRemoved.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowRemoved proto.InternalMessageInfo
+
+func (m *OfpFlowRemoved) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetReason() OfpFlowRemovedReason {
+	if m != nil {
+		return m.Reason
+	}
+	return OfpFlowRemovedReason_OFPRR_IDLE_TIMEOUT
+}
+
+func (m *OfpFlowRemoved) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetIdleTimeout() uint32 {
+	if m != nil {
+		return m.IdleTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetHardTimeout() uint32 {
+	if m != nil {
+		return m.HardTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpFlowRemoved) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+// Common header for all meter bands
+type OfpMeterBandHeader struct {
+	Type      OfpMeterBandType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMeterBandType" json:"type,omitempty"`
+	Rate      uint32           `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
+	BurstSize uint32           `protobuf:"varint,3,opt,name=burst_size,json=burstSize,proto3" json:"burst_size,omitempty"`
+	// Types that are valid to be assigned to Data:
+	//	*OfpMeterBandHeader_Drop
+	//	*OfpMeterBandHeader_DscpRemark
+	//	*OfpMeterBandHeader_Experimenter
+	Data                 isOfpMeterBandHeader_Data `protobuf_oneof:"data"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *OfpMeterBandHeader) Reset()         { *m = OfpMeterBandHeader{} }
+func (m *OfpMeterBandHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandHeader) ProtoMessage()    {}
+func (*OfpMeterBandHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{36}
+}
+
+func (m *OfpMeterBandHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandHeader.Unmarshal(m, b)
+}
+func (m *OfpMeterBandHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandHeader.Merge(m, src)
+}
+func (m *OfpMeterBandHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandHeader.Size(m)
+}
+func (m *OfpMeterBandHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandHeader proto.InternalMessageInfo
+
+func (m *OfpMeterBandHeader) GetType() OfpMeterBandType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMeterBandType_OFPMBT_INVALID
+}
+
+func (m *OfpMeterBandHeader) GetRate() uint32 {
+	if m != nil {
+		return m.Rate
+	}
+	return 0
+}
+
+func (m *OfpMeterBandHeader) GetBurstSize() uint32 {
+	if m != nil {
+		return m.BurstSize
+	}
+	return 0
+}
+
+type isOfpMeterBandHeader_Data interface {
+	isOfpMeterBandHeader_Data()
+}
+
+type OfpMeterBandHeader_Drop struct {
+	Drop *OfpMeterBandDrop `protobuf:"bytes,4,opt,name=drop,proto3,oneof"`
+}
+
+type OfpMeterBandHeader_DscpRemark struct {
+	DscpRemark *OfpMeterBandDscpRemark `protobuf:"bytes,5,opt,name=dscp_remark,json=dscpRemark,proto3,oneof"`
+}
+
+type OfpMeterBandHeader_Experimenter struct {
+	Experimenter *OfpMeterBandExperimenter `protobuf:"bytes,6,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpMeterBandHeader_Drop) isOfpMeterBandHeader_Data() {}
+
+func (*OfpMeterBandHeader_DscpRemark) isOfpMeterBandHeader_Data() {}
+
+func (*OfpMeterBandHeader_Experimenter) isOfpMeterBandHeader_Data() {}
+
+func (m *OfpMeterBandHeader) GetData() isOfpMeterBandHeader_Data {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+func (m *OfpMeterBandHeader) GetDrop() *OfpMeterBandDrop {
+	if x, ok := m.GetData().(*OfpMeterBandHeader_Drop); ok {
+		return x.Drop
+	}
+	return nil
+}
+
+func (m *OfpMeterBandHeader) GetDscpRemark() *OfpMeterBandDscpRemark {
+	if x, ok := m.GetData().(*OfpMeterBandHeader_DscpRemark); ok {
+		return x.DscpRemark
+	}
+	return nil
+}
+
+func (m *OfpMeterBandHeader) GetExperimenter() *OfpMeterBandExperimenter {
+	if x, ok := m.GetData().(*OfpMeterBandHeader_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpMeterBandHeader) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpMeterBandHeader_Drop)(nil),
+		(*OfpMeterBandHeader_DscpRemark)(nil),
+		(*OfpMeterBandHeader_Experimenter)(nil),
+	}
+}
+
+// OFPMBT_DROP band - drop packets
+type OfpMeterBandDrop struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandDrop) Reset()         { *m = OfpMeterBandDrop{} }
+func (m *OfpMeterBandDrop) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandDrop) ProtoMessage()    {}
+func (*OfpMeterBandDrop) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{37}
+}
+
+func (m *OfpMeterBandDrop) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandDrop.Unmarshal(m, b)
+}
+func (m *OfpMeterBandDrop) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandDrop.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandDrop) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandDrop.Merge(m, src)
+}
+func (m *OfpMeterBandDrop) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandDrop.Size(m)
+}
+func (m *OfpMeterBandDrop) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandDrop.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandDrop proto.InternalMessageInfo
+
+// OFPMBT_DSCP_REMARK band - Remark DSCP in the IP header
+type OfpMeterBandDscpRemark struct {
+	PrecLevel            uint32   `protobuf:"varint,1,opt,name=prec_level,json=precLevel,proto3" json:"prec_level,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandDscpRemark) Reset()         { *m = OfpMeterBandDscpRemark{} }
+func (m *OfpMeterBandDscpRemark) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandDscpRemark) ProtoMessage()    {}
+func (*OfpMeterBandDscpRemark) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{38}
+}
+
+func (m *OfpMeterBandDscpRemark) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandDscpRemark.Unmarshal(m, b)
+}
+func (m *OfpMeterBandDscpRemark) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandDscpRemark.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandDscpRemark) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandDscpRemark.Merge(m, src)
+}
+func (m *OfpMeterBandDscpRemark) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandDscpRemark.Size(m)
+}
+func (m *OfpMeterBandDscpRemark) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandDscpRemark.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandDscpRemark proto.InternalMessageInfo
+
+func (m *OfpMeterBandDscpRemark) GetPrecLevel() uint32 {
+	if m != nil {
+		return m.PrecLevel
+	}
+	return 0
+}
+
+// OFPMBT_EXPERIMENTER band - Experimenter type.
+// The rest of the band is experimenter-defined.
+type OfpMeterBandExperimenter struct {
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandExperimenter) Reset()         { *m = OfpMeterBandExperimenter{} }
+func (m *OfpMeterBandExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandExperimenter) ProtoMessage()    {}
+func (*OfpMeterBandExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{39}
+}
+
+func (m *OfpMeterBandExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandExperimenter.Unmarshal(m, b)
+}
+func (m *OfpMeterBandExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandExperimenter.Merge(m, src)
+}
+func (m *OfpMeterBandExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandExperimenter.Size(m)
+}
+func (m *OfpMeterBandExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandExperimenter proto.InternalMessageInfo
+
+func (m *OfpMeterBandExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+// Meter configuration. OFPT_METER_MOD.
+type OfpMeterMod struct {
+	Command              OfpMeterModCommand    `protobuf:"varint,1,opt,name=command,proto3,enum=openflow_13.OfpMeterModCommand" json:"command,omitempty"`
+	Flags                uint32                `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	MeterId              uint32                `protobuf:"varint,3,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	Bands                []*OfpMeterBandHeader `protobuf:"bytes,4,rep,name=bands,proto3" json:"bands,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpMeterMod) Reset()         { *m = OfpMeterMod{} }
+func (m *OfpMeterMod) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterMod) ProtoMessage()    {}
+func (*OfpMeterMod) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{40}
+}
+
+func (m *OfpMeterMod) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterMod.Unmarshal(m, b)
+}
+func (m *OfpMeterMod) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterMod.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterMod) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterMod.Merge(m, src)
+}
+func (m *OfpMeterMod) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterMod.Size(m)
+}
+func (m *OfpMeterMod) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterMod.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterMod proto.InternalMessageInfo
+
+func (m *OfpMeterMod) GetCommand() OfpMeterModCommand {
+	if m != nil {
+		return m.Command
+	}
+	return OfpMeterModCommand_OFPMC_ADD
+}
+
+func (m *OfpMeterMod) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMeterMod) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+func (m *OfpMeterMod) GetBands() []*OfpMeterBandHeader {
+	if m != nil {
+		return m.Bands
+	}
+	return nil
+}
+
+// OFPT_ERROR: Error message (datapath -> controller).
+type OfpErrorMsg struct {
+	Header               *OfpHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	Type                 uint32     `protobuf:"varint,2,opt,name=type,proto3" json:"type,omitempty"`
+	Code                 uint32     `protobuf:"varint,3,opt,name=code,proto3" json:"code,omitempty"`
+	Data                 []byte     `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *OfpErrorMsg) Reset()         { *m = OfpErrorMsg{} }
+func (m *OfpErrorMsg) String() string { return proto.CompactTextString(m) }
+func (*OfpErrorMsg) ProtoMessage()    {}
+func (*OfpErrorMsg) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{41}
+}
+
+func (m *OfpErrorMsg) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpErrorMsg.Unmarshal(m, b)
+}
+func (m *OfpErrorMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpErrorMsg.Marshal(b, m, deterministic)
+}
+func (m *OfpErrorMsg) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpErrorMsg.Merge(m, src)
+}
+func (m *OfpErrorMsg) XXX_Size() int {
+	return xxx_messageInfo_OfpErrorMsg.Size(m)
+}
+func (m *OfpErrorMsg) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpErrorMsg.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpErrorMsg proto.InternalMessageInfo
+
+func (m *OfpErrorMsg) GetHeader() *OfpHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+func (m *OfpErrorMsg) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *OfpErrorMsg) GetCode() uint32 {
+	if m != nil {
+		return m.Code
+	}
+	return 0
+}
+
+func (m *OfpErrorMsg) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// OFPET_EXPERIMENTER: Error message (datapath -> controller).
+type OfpErrorExperimenterMsg struct {
+	Type                 uint32   `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	ExpType              uint32   `protobuf:"varint,2,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	Experimenter         uint32   `protobuf:"varint,3,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	Data                 []byte   `protobuf:"bytes,4,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpErrorExperimenterMsg) Reset()         { *m = OfpErrorExperimenterMsg{} }
+func (m *OfpErrorExperimenterMsg) String() string { return proto.CompactTextString(m) }
+func (*OfpErrorExperimenterMsg) ProtoMessage()    {}
+func (*OfpErrorExperimenterMsg) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{42}
+}
+
+func (m *OfpErrorExperimenterMsg) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpErrorExperimenterMsg.Unmarshal(m, b)
+}
+func (m *OfpErrorExperimenterMsg) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpErrorExperimenterMsg.Marshal(b, m, deterministic)
+}
+func (m *OfpErrorExperimenterMsg) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpErrorExperimenterMsg.Merge(m, src)
+}
+func (m *OfpErrorExperimenterMsg) XXX_Size() int {
+	return xxx_messageInfo_OfpErrorExperimenterMsg.Size(m)
+}
+func (m *OfpErrorExperimenterMsg) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpErrorExperimenterMsg.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpErrorExperimenterMsg proto.InternalMessageInfo
+
+func (m *OfpErrorExperimenterMsg) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *OfpErrorExperimenterMsg) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpErrorExperimenterMsg) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpErrorExperimenterMsg) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+type OfpMultipartRequest struct {
+	//ofp_header header;
+	Type                 OfpMultipartType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMultipartType" json:"type,omitempty"`
+	Flags                uint32           `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	Body                 []byte           `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *OfpMultipartRequest) Reset()         { *m = OfpMultipartRequest{} }
+func (m *OfpMultipartRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpMultipartRequest) ProtoMessage()    {}
+func (*OfpMultipartRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{43}
+}
+
+func (m *OfpMultipartRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMultipartRequest.Unmarshal(m, b)
+}
+func (m *OfpMultipartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMultipartRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpMultipartRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMultipartRequest.Merge(m, src)
+}
+func (m *OfpMultipartRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpMultipartRequest.Size(m)
+}
+func (m *OfpMultipartRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMultipartRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMultipartRequest proto.InternalMessageInfo
+
+func (m *OfpMultipartRequest) GetType() OfpMultipartType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMultipartType_OFPMP_DESC
+}
+
+func (m *OfpMultipartRequest) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMultipartRequest) GetBody() []byte {
+	if m != nil {
+		return m.Body
+	}
+	return nil
+}
+
+type OfpMultipartReply struct {
+	//ofp_header header;
+	Type                 OfpMultipartType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpMultipartType" json:"type,omitempty"`
+	Flags                uint32           `protobuf:"varint,2,opt,name=flags,proto3" json:"flags,omitempty"`
+	Body                 []byte           `protobuf:"bytes,3,opt,name=body,proto3" json:"body,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *OfpMultipartReply) Reset()         { *m = OfpMultipartReply{} }
+func (m *OfpMultipartReply) String() string { return proto.CompactTextString(m) }
+func (*OfpMultipartReply) ProtoMessage()    {}
+func (*OfpMultipartReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{44}
+}
+
+func (m *OfpMultipartReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMultipartReply.Unmarshal(m, b)
+}
+func (m *OfpMultipartReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMultipartReply.Marshal(b, m, deterministic)
+}
+func (m *OfpMultipartReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMultipartReply.Merge(m, src)
+}
+func (m *OfpMultipartReply) XXX_Size() int {
+	return xxx_messageInfo_OfpMultipartReply.Size(m)
+}
+func (m *OfpMultipartReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMultipartReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMultipartReply proto.InternalMessageInfo
+
+func (m *OfpMultipartReply) GetType() OfpMultipartType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpMultipartType_OFPMP_DESC
+}
+
+func (m *OfpMultipartReply) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMultipartReply) GetBody() []byte {
+	if m != nil {
+		return m.Body
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_DESC request.  Each entry is a NULL-terminated
+// ASCII string.
+type OfpDesc struct {
+	MfrDesc              string   `protobuf:"bytes,1,opt,name=mfr_desc,json=mfrDesc,proto3" json:"mfr_desc,omitempty"`
+	HwDesc               string   `protobuf:"bytes,2,opt,name=hw_desc,json=hwDesc,proto3" json:"hw_desc,omitempty"`
+	SwDesc               string   `protobuf:"bytes,3,opt,name=sw_desc,json=swDesc,proto3" json:"sw_desc,omitempty"`
+	SerialNum            string   `protobuf:"bytes,4,opt,name=serial_num,json=serialNum,proto3" json:"serial_num,omitempty"`
+	DpDesc               string   `protobuf:"bytes,5,opt,name=dp_desc,json=dpDesc,proto3" json:"dp_desc,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpDesc) Reset()         { *m = OfpDesc{} }
+func (m *OfpDesc) String() string { return proto.CompactTextString(m) }
+func (*OfpDesc) ProtoMessage()    {}
+func (*OfpDesc) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{45}
+}
+
+func (m *OfpDesc) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpDesc.Unmarshal(m, b)
+}
+func (m *OfpDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpDesc.Marshal(b, m, deterministic)
+}
+func (m *OfpDesc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpDesc.Merge(m, src)
+}
+func (m *OfpDesc) XXX_Size() int {
+	return xxx_messageInfo_OfpDesc.Size(m)
+}
+func (m *OfpDesc) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpDesc.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpDesc proto.InternalMessageInfo
+
+func (m *OfpDesc) GetMfrDesc() string {
+	if m != nil {
+		return m.MfrDesc
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetHwDesc() string {
+	if m != nil {
+		return m.HwDesc
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetSwDesc() string {
+	if m != nil {
+		return m.SwDesc
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetSerialNum() string {
+	if m != nil {
+		return m.SerialNum
+	}
+	return ""
+}
+
+func (m *OfpDesc) GetDpDesc() string {
+	if m != nil {
+		return m.DpDesc
+	}
+	return ""
+}
+
+// Body for ofp_multipart_request of type OFPMP_FLOW.
+type OfpFlowStatsRequest struct {
+	TableId              uint32    `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	OutPort              uint32    `protobuf:"varint,2,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	OutGroup             uint32    `protobuf:"varint,3,opt,name=out_group,json=outGroup,proto3" json:"out_group,omitempty"`
+	Cookie               uint64    `protobuf:"varint,4,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	CookieMask           uint64    `protobuf:"varint,5,opt,name=cookie_mask,json=cookieMask,proto3" json:"cookie_mask,omitempty"`
+	Match                *OfpMatch `protobuf:"bytes,6,opt,name=match,proto3" json:"match,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *OfpFlowStatsRequest) Reset()         { *m = OfpFlowStatsRequest{} }
+func (m *OfpFlowStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowStatsRequest) ProtoMessage()    {}
+func (*OfpFlowStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{46}
+}
+
+func (m *OfpFlowStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpFlowStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowStatsRequest.Merge(m, src)
+}
+func (m *OfpFlowStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowStatsRequest.Size(m)
+}
+func (m *OfpFlowStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowStatsRequest proto.InternalMessageInfo
+
+func (m *OfpFlowStatsRequest) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetOutPort() uint32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetOutGroup() uint32 {
+	if m != nil {
+		return m.OutGroup
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetCookieMask() uint64 {
+	if m != nil {
+		return m.CookieMask
+	}
+	return 0
+}
+
+func (m *OfpFlowStatsRequest) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_FLOW request.
+type OfpFlowStats struct {
+	Id                   uint64            `protobuf:"varint,14,opt,name=id,proto3" json:"id,omitempty"`
+	TableId              uint32            `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	DurationSec          uint32            `protobuf:"varint,2,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32            `protobuf:"varint,3,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	Priority             uint32            `protobuf:"varint,4,opt,name=priority,proto3" json:"priority,omitempty"`
+	IdleTimeout          uint32            `protobuf:"varint,5,opt,name=idle_timeout,json=idleTimeout,proto3" json:"idle_timeout,omitempty"`
+	HardTimeout          uint32            `protobuf:"varint,6,opt,name=hard_timeout,json=hardTimeout,proto3" json:"hard_timeout,omitempty"`
+	Flags                uint32            `protobuf:"varint,7,opt,name=flags,proto3" json:"flags,omitempty"`
+	Cookie               uint64            `protobuf:"varint,8,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	PacketCount          uint64            `protobuf:"varint,9,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64            `protobuf:"varint,10,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	Match                *OfpMatch         `protobuf:"bytes,12,opt,name=match,proto3" json:"match,omitempty"`
+	Instructions         []*OfpInstruction `protobuf:"bytes,13,rep,name=instructions,proto3" json:"instructions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpFlowStats) Reset()         { *m = OfpFlowStats{} }
+func (m *OfpFlowStats) String() string { return proto.CompactTextString(m) }
+func (*OfpFlowStats) ProtoMessage()    {}
+func (*OfpFlowStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{47}
+}
+
+func (m *OfpFlowStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpFlowStats.Unmarshal(m, b)
+}
+func (m *OfpFlowStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpFlowStats.Marshal(b, m, deterministic)
+}
+func (m *OfpFlowStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpFlowStats.Merge(m, src)
+}
+func (m *OfpFlowStats) XXX_Size() int {
+	return xxx_messageInfo_OfpFlowStats.Size(m)
+}
+func (m *OfpFlowStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpFlowStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpFlowStats proto.InternalMessageInfo
+
+func (m *OfpFlowStats) GetId() uint64 {
+	if m != nil {
+		return m.Id
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetPriority() uint32 {
+	if m != nil {
+		return m.Priority
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetIdleTimeout() uint32 {
+	if m != nil {
+		return m.IdleTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetHardTimeout() uint32 {
+	if m != nil {
+		return m.HardTimeout
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpFlowStats) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+func (m *OfpFlowStats) GetInstructions() []*OfpInstruction {
+	if m != nil {
+		return m.Instructions
+	}
+	return nil
+}
+
+// Body for ofp_multipart_request of type OFPMP_AGGREGATE.
+type OfpAggregateStatsRequest struct {
+	TableId              uint32    `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	OutPort              uint32    `protobuf:"varint,2,opt,name=out_port,json=outPort,proto3" json:"out_port,omitempty"`
+	OutGroup             uint32    `protobuf:"varint,3,opt,name=out_group,json=outGroup,proto3" json:"out_group,omitempty"`
+	Cookie               uint64    `protobuf:"varint,4,opt,name=cookie,proto3" json:"cookie,omitempty"`
+	CookieMask           uint64    `protobuf:"varint,5,opt,name=cookie_mask,json=cookieMask,proto3" json:"cookie_mask,omitempty"`
+	Match                *OfpMatch `protobuf:"bytes,6,opt,name=match,proto3" json:"match,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *OfpAggregateStatsRequest) Reset()         { *m = OfpAggregateStatsRequest{} }
+func (m *OfpAggregateStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpAggregateStatsRequest) ProtoMessage()    {}
+func (*OfpAggregateStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{48}
+}
+
+func (m *OfpAggregateStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAggregateStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpAggregateStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAggregateStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpAggregateStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAggregateStatsRequest.Merge(m, src)
+}
+func (m *OfpAggregateStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpAggregateStatsRequest.Size(m)
+}
+func (m *OfpAggregateStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAggregateStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAggregateStatsRequest proto.InternalMessageInfo
+
+func (m *OfpAggregateStatsRequest) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetOutPort() uint32 {
+	if m != nil {
+		return m.OutPort
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetOutGroup() uint32 {
+	if m != nil {
+		return m.OutGroup
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetCookie() uint64 {
+	if m != nil {
+		return m.Cookie
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetCookieMask() uint64 {
+	if m != nil {
+		return m.CookieMask
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsRequest) GetMatch() *OfpMatch {
+	if m != nil {
+		return m.Match
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_AGGREGATE request.
+type OfpAggregateStatsReply struct {
+	PacketCount          uint64   `protobuf:"varint,1,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64   `protobuf:"varint,2,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	FlowCount            uint32   `protobuf:"varint,3,opt,name=flow_count,json=flowCount,proto3" json:"flow_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpAggregateStatsReply) Reset()         { *m = OfpAggregateStatsReply{} }
+func (m *OfpAggregateStatsReply) String() string { return proto.CompactTextString(m) }
+func (*OfpAggregateStatsReply) ProtoMessage()    {}
+func (*OfpAggregateStatsReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{49}
+}
+
+func (m *OfpAggregateStatsReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAggregateStatsReply.Unmarshal(m, b)
+}
+func (m *OfpAggregateStatsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAggregateStatsReply.Marshal(b, m, deterministic)
+}
+func (m *OfpAggregateStatsReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAggregateStatsReply.Merge(m, src)
+}
+func (m *OfpAggregateStatsReply) XXX_Size() int {
+	return xxx_messageInfo_OfpAggregateStatsReply.Size(m)
+}
+func (m *OfpAggregateStatsReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAggregateStatsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAggregateStatsReply proto.InternalMessageInfo
+
+func (m *OfpAggregateStatsReply) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsReply) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpAggregateStatsReply) GetFlowCount() uint32 {
+	if m != nil {
+		return m.FlowCount
+	}
+	return 0
+}
+
+// Common header for all Table Feature Properties
+type OfpTableFeatureProperty struct {
+	Type OfpTableFeaturePropType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpTableFeaturePropType" json:"type,omitempty"`
+	// Types that are valid to be assigned to Value:
+	//	*OfpTableFeatureProperty_Instructions
+	//	*OfpTableFeatureProperty_NextTables
+	//	*OfpTableFeatureProperty_Actions
+	//	*OfpTableFeatureProperty_Oxm
+	//	*OfpTableFeatureProperty_Experimenter
+	Value                isOfpTableFeatureProperty_Value `protobuf_oneof:"value"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *OfpTableFeatureProperty) Reset()         { *m = OfpTableFeatureProperty{} }
+func (m *OfpTableFeatureProperty) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeatureProperty) ProtoMessage()    {}
+func (*OfpTableFeatureProperty) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{50}
+}
+
+func (m *OfpTableFeatureProperty) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeatureProperty.Unmarshal(m, b)
+}
+func (m *OfpTableFeatureProperty) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeatureProperty.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeatureProperty) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeatureProperty.Merge(m, src)
+}
+func (m *OfpTableFeatureProperty) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeatureProperty.Size(m)
+}
+func (m *OfpTableFeatureProperty) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeatureProperty.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeatureProperty proto.InternalMessageInfo
+
+func (m *OfpTableFeatureProperty) GetType() OfpTableFeaturePropType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpTableFeaturePropType_OFPTFPT_INSTRUCTIONS
+}
+
+type isOfpTableFeatureProperty_Value interface {
+	isOfpTableFeatureProperty_Value()
+}
+
+type OfpTableFeatureProperty_Instructions struct {
+	Instructions *OfpTableFeaturePropInstructions `protobuf:"bytes,2,opt,name=instructions,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_NextTables struct {
+	NextTables *OfpTableFeaturePropNextTables `protobuf:"bytes,3,opt,name=next_tables,json=nextTables,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_Actions struct {
+	Actions *OfpTableFeaturePropActions `protobuf:"bytes,4,opt,name=actions,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_Oxm struct {
+	Oxm *OfpTableFeaturePropOxm `protobuf:"bytes,5,opt,name=oxm,proto3,oneof"`
+}
+
+type OfpTableFeatureProperty_Experimenter struct {
+	Experimenter *OfpTableFeaturePropExperimenter `protobuf:"bytes,6,opt,name=experimenter,proto3,oneof"`
+}
+
+func (*OfpTableFeatureProperty_Instructions) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_NextTables) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_Actions) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_Oxm) isOfpTableFeatureProperty_Value() {}
+
+func (*OfpTableFeatureProperty_Experimenter) isOfpTableFeatureProperty_Value() {}
+
+func (m *OfpTableFeatureProperty) GetValue() isOfpTableFeatureProperty_Value {
+	if m != nil {
+		return m.Value
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetInstructions() *OfpTableFeaturePropInstructions {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Instructions); ok {
+		return x.Instructions
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetNextTables() *OfpTableFeaturePropNextTables {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_NextTables); ok {
+		return x.NextTables
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetActions() *OfpTableFeaturePropActions {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Actions); ok {
+		return x.Actions
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetOxm() *OfpTableFeaturePropOxm {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Oxm); ok {
+		return x.Oxm
+	}
+	return nil
+}
+
+func (m *OfpTableFeatureProperty) GetExperimenter() *OfpTableFeaturePropExperimenter {
+	if x, ok := m.GetValue().(*OfpTableFeatureProperty_Experimenter); ok {
+		return x.Experimenter
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*OfpTableFeatureProperty) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*OfpTableFeatureProperty_Instructions)(nil),
+		(*OfpTableFeatureProperty_NextTables)(nil),
+		(*OfpTableFeatureProperty_Actions)(nil),
+		(*OfpTableFeatureProperty_Oxm)(nil),
+		(*OfpTableFeatureProperty_Experimenter)(nil),
+	}
+}
+
+// Instructions property
+type OfpTableFeaturePropInstructions struct {
+	// One of OFPTFPT_INSTRUCTIONS,
+	//OFPTFPT_INSTRUCTIONS_MISS.
+	Instructions         []*OfpInstruction `protobuf:"bytes,1,rep,name=instructions,proto3" json:"instructions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpTableFeaturePropInstructions) Reset()         { *m = OfpTableFeaturePropInstructions{} }
+func (m *OfpTableFeaturePropInstructions) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropInstructions) ProtoMessage()    {}
+func (*OfpTableFeaturePropInstructions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{51}
+}
+
+func (m *OfpTableFeaturePropInstructions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropInstructions.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropInstructions.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropInstructions.Merge(m, src)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropInstructions.Size(m)
+}
+func (m *OfpTableFeaturePropInstructions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropInstructions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropInstructions proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropInstructions) GetInstructions() []*OfpInstruction {
+	if m != nil {
+		return m.Instructions
+	}
+	return nil
+}
+
+// Next Tables property
+type OfpTableFeaturePropNextTables struct {
+	// One of OFPTFPT_NEXT_TABLES,
+	//OFPTFPT_NEXT_TABLES_MISS.
+	NextTableIds         []uint32 `protobuf:"varint,1,rep,packed,name=next_table_ids,json=nextTableIds,proto3" json:"next_table_ids,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableFeaturePropNextTables) Reset()         { *m = OfpTableFeaturePropNextTables{} }
+func (m *OfpTableFeaturePropNextTables) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropNextTables) ProtoMessage()    {}
+func (*OfpTableFeaturePropNextTables) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{52}
+}
+
+func (m *OfpTableFeaturePropNextTables) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropNextTables.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropNextTables.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropNextTables.Merge(m, src)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropNextTables.Size(m)
+}
+func (m *OfpTableFeaturePropNextTables) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropNextTables.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropNextTables proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropNextTables) GetNextTableIds() []uint32 {
+	if m != nil {
+		return m.NextTableIds
+	}
+	return nil
+}
+
+// Actions property
+type OfpTableFeaturePropActions struct {
+	// One of OFPTFPT_WRITE_ACTIONS,
+	//OFPTFPT_WRITE_ACTIONS_MISS,
+	//OFPTFPT_APPLY_ACTIONS,
+	//OFPTFPT_APPLY_ACTIONS_MISS.
+	Actions              []*OfpAction `protobuf:"bytes,1,rep,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpTableFeaturePropActions) Reset()         { *m = OfpTableFeaturePropActions{} }
+func (m *OfpTableFeaturePropActions) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropActions) ProtoMessage()    {}
+func (*OfpTableFeaturePropActions) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{53}
+}
+
+func (m *OfpTableFeaturePropActions) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropActions.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropActions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropActions.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropActions) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropActions.Merge(m, src)
+}
+func (m *OfpTableFeaturePropActions) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropActions.Size(m)
+}
+func (m *OfpTableFeaturePropActions) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropActions.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropActions proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropActions) GetActions() []*OfpAction {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Match, Wildcard or Set-Field property
+type OfpTableFeaturePropOxm struct {
+	// TODO is this a uint32???
+	OxmIds               []uint32 `protobuf:"varint,3,rep,packed,name=oxm_ids,json=oxmIds,proto3" json:"oxm_ids,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableFeaturePropOxm) Reset()         { *m = OfpTableFeaturePropOxm{} }
+func (m *OfpTableFeaturePropOxm) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropOxm) ProtoMessage()    {}
+func (*OfpTableFeaturePropOxm) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{54}
+}
+
+func (m *OfpTableFeaturePropOxm) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropOxm.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropOxm) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropOxm.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropOxm) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropOxm.Merge(m, src)
+}
+func (m *OfpTableFeaturePropOxm) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropOxm.Size(m)
+}
+func (m *OfpTableFeaturePropOxm) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropOxm.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropOxm proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropOxm) GetOxmIds() []uint32 {
+	if m != nil {
+		return m.OxmIds
+	}
+	return nil
+}
+
+// Experimenter table feature property
+type OfpTableFeaturePropExperimenter struct {
+	// One of OFPTFPT_EXPERIMENTER,
+	//OFPTFPT_EXPERIMENTER_MISS.
+	Experimenter         uint32   `protobuf:"varint,2,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	ExpType              uint32   `protobuf:"varint,3,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	ExperimenterData     []uint32 `protobuf:"varint,4,rep,packed,name=experimenter_data,json=experimenterData,proto3" json:"experimenter_data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableFeaturePropExperimenter) Reset()         { *m = OfpTableFeaturePropExperimenter{} }
+func (m *OfpTableFeaturePropExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeaturePropExperimenter) ProtoMessage()    {}
+func (*OfpTableFeaturePropExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{55}
+}
+
+func (m *OfpTableFeaturePropExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeaturePropExperimenter.Unmarshal(m, b)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeaturePropExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeaturePropExperimenter.Merge(m, src)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeaturePropExperimenter.Size(m)
+}
+func (m *OfpTableFeaturePropExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeaturePropExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeaturePropExperimenter proto.InternalMessageInfo
+
+func (m *OfpTableFeaturePropExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpTableFeaturePropExperimenter) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpTableFeaturePropExperimenter) GetExperimenterData() []uint32 {
+	if m != nil {
+		return m.ExperimenterData
+	}
+	return nil
+}
+
+// Body for ofp_multipart_request of type OFPMP_TABLE_FEATURES./
+// Body of reply to OFPMP_TABLE_FEATURES request.
+type OfpTableFeatures struct {
+	TableId       uint32 `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	Name          string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	MetadataMatch uint64 `protobuf:"varint,3,opt,name=metadata_match,json=metadataMatch,proto3" json:"metadata_match,omitempty"`
+	MetadataWrite uint64 `protobuf:"varint,4,opt,name=metadata_write,json=metadataWrite,proto3" json:"metadata_write,omitempty"`
+	Config        uint32 `protobuf:"varint,5,opt,name=config,proto3" json:"config,omitempty"`
+	MaxEntries    uint32 `protobuf:"varint,6,opt,name=max_entries,json=maxEntries,proto3" json:"max_entries,omitempty"`
+	// Table Feature Property list
+	Properties           []*OfpTableFeatureProperty `protobuf:"bytes,7,rep,name=properties,proto3" json:"properties,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                   `json:"-"`
+	XXX_unrecognized     []byte                     `json:"-"`
+	XXX_sizecache        int32                      `json:"-"`
+}
+
+func (m *OfpTableFeatures) Reset()         { *m = OfpTableFeatures{} }
+func (m *OfpTableFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpTableFeatures) ProtoMessage()    {}
+func (*OfpTableFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{56}
+}
+
+func (m *OfpTableFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableFeatures.Unmarshal(m, b)
+}
+func (m *OfpTableFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpTableFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableFeatures.Merge(m, src)
+}
+func (m *OfpTableFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpTableFeatures.Size(m)
+}
+func (m *OfpTableFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableFeatures proto.InternalMessageInfo
+
+func (m *OfpTableFeatures) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *OfpTableFeatures) GetMetadataMatch() uint64 {
+	if m != nil {
+		return m.MetadataMatch
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetMetadataWrite() uint64 {
+	if m != nil {
+		return m.MetadataWrite
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetConfig() uint32 {
+	if m != nil {
+		return m.Config
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetMaxEntries() uint32 {
+	if m != nil {
+		return m.MaxEntries
+	}
+	return 0
+}
+
+func (m *OfpTableFeatures) GetProperties() []*OfpTableFeatureProperty {
+	if m != nil {
+		return m.Properties
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_TABLE request.
+type OfpTableStats struct {
+	TableId              uint32   `protobuf:"varint,1,opt,name=table_id,json=tableId,proto3" json:"table_id,omitempty"`
+	ActiveCount          uint32   `protobuf:"varint,2,opt,name=active_count,json=activeCount,proto3" json:"active_count,omitempty"`
+	LookupCount          uint64   `protobuf:"varint,3,opt,name=lookup_count,json=lookupCount,proto3" json:"lookup_count,omitempty"`
+	MatchedCount         uint64   `protobuf:"varint,4,opt,name=matched_count,json=matchedCount,proto3" json:"matched_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpTableStats) Reset()         { *m = OfpTableStats{} }
+func (m *OfpTableStats) String() string { return proto.CompactTextString(m) }
+func (*OfpTableStats) ProtoMessage()    {}
+func (*OfpTableStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{57}
+}
+
+func (m *OfpTableStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpTableStats.Unmarshal(m, b)
+}
+func (m *OfpTableStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpTableStats.Marshal(b, m, deterministic)
+}
+func (m *OfpTableStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpTableStats.Merge(m, src)
+}
+func (m *OfpTableStats) XXX_Size() int {
+	return xxx_messageInfo_OfpTableStats.Size(m)
+}
+func (m *OfpTableStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpTableStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpTableStats proto.InternalMessageInfo
+
+func (m *OfpTableStats) GetTableId() uint32 {
+	if m != nil {
+		return m.TableId
+	}
+	return 0
+}
+
+func (m *OfpTableStats) GetActiveCount() uint32 {
+	if m != nil {
+		return m.ActiveCount
+	}
+	return 0
+}
+
+func (m *OfpTableStats) GetLookupCount() uint64 {
+	if m != nil {
+		return m.LookupCount
+	}
+	return 0
+}
+
+func (m *OfpTableStats) GetMatchedCount() uint64 {
+	if m != nil {
+		return m.MatchedCount
+	}
+	return 0
+}
+
+// Body for ofp_multipart_request of type OFPMP_PORT.
+type OfpPortStatsRequest struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPortStatsRequest) Reset()         { *m = OfpPortStatsRequest{} }
+func (m *OfpPortStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpPortStatsRequest) ProtoMessage()    {}
+func (*OfpPortStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{58}
+}
+
+func (m *OfpPortStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpPortStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpPortStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortStatsRequest.Merge(m, src)
+}
+func (m *OfpPortStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpPortStatsRequest.Size(m)
+}
+func (m *OfpPortStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortStatsRequest proto.InternalMessageInfo
+
+func (m *OfpPortStatsRequest) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+// Body of reply to OFPMP_PORT request. If a counter is unsupported, set
+// the field to all ones.
+type OfpPortStats struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	RxPackets            uint64   `protobuf:"varint,2,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	TxPackets            uint64   `protobuf:"varint,3,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	RxBytes              uint64   `protobuf:"varint,4,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	TxBytes              uint64   `protobuf:"varint,5,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	RxDropped            uint64   `protobuf:"varint,6,opt,name=rx_dropped,json=rxDropped,proto3" json:"rx_dropped,omitempty"`
+	TxDropped            uint64   `protobuf:"varint,7,opt,name=tx_dropped,json=txDropped,proto3" json:"tx_dropped,omitempty"`
+	RxErrors             uint64   `protobuf:"varint,8,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
+	TxErrors             uint64   `protobuf:"varint,9,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+	RxFrameErr           uint64   `protobuf:"varint,10,opt,name=rx_frame_err,json=rxFrameErr,proto3" json:"rx_frame_err,omitempty"`
+	RxOverErr            uint64   `protobuf:"varint,11,opt,name=rx_over_err,json=rxOverErr,proto3" json:"rx_over_err,omitempty"`
+	RxCrcErr             uint64   `protobuf:"varint,12,opt,name=rx_crc_err,json=rxCrcErr,proto3" json:"rx_crc_err,omitempty"`
+	Collisions           uint64   `protobuf:"varint,13,opt,name=collisions,proto3" json:"collisions,omitempty"`
+	DurationSec          uint32   `protobuf:"varint,14,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32   `protobuf:"varint,15,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpPortStats) Reset()         { *m = OfpPortStats{} }
+func (m *OfpPortStats) String() string { return proto.CompactTextString(m) }
+func (*OfpPortStats) ProtoMessage()    {}
+func (*OfpPortStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{59}
+}
+
+func (m *OfpPortStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPortStats.Unmarshal(m, b)
+}
+func (m *OfpPortStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPortStats.Marshal(b, m, deterministic)
+}
+func (m *OfpPortStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPortStats.Merge(m, src)
+}
+func (m *OfpPortStats) XXX_Size() int {
+	return xxx_messageInfo_OfpPortStats.Size(m)
+}
+func (m *OfpPortStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPortStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPortStats proto.InternalMessageInfo
+
+func (m *OfpPortStats) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxDropped() uint64 {
+	if m != nil {
+		return m.RxDropped
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxDropped() uint64 {
+	if m != nil {
+		return m.TxDropped
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxErrors() uint64 {
+	if m != nil {
+		return m.RxErrors
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetTxErrors() uint64 {
+	if m != nil {
+		return m.TxErrors
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxFrameErr() uint64 {
+	if m != nil {
+		return m.RxFrameErr
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxOverErr() uint64 {
+	if m != nil {
+		return m.RxOverErr
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetRxCrcErr() uint64 {
+	if m != nil {
+		return m.RxCrcErr
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetCollisions() uint64 {
+	if m != nil {
+		return m.Collisions
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpPortStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+// Body of OFPMP_GROUP request.
+type OfpGroupStatsRequest struct {
+	GroupId              uint32   `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpGroupStatsRequest) Reset()         { *m = OfpGroupStatsRequest{} }
+func (m *OfpGroupStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupStatsRequest) ProtoMessage()    {}
+func (*OfpGroupStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{60}
+}
+
+func (m *OfpGroupStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpGroupStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupStatsRequest.Merge(m, src)
+}
+func (m *OfpGroupStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupStatsRequest.Size(m)
+}
+func (m *OfpGroupStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupStatsRequest proto.InternalMessageInfo
+
+func (m *OfpGroupStatsRequest) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+// Used in group stats replies.
+type OfpBucketCounter struct {
+	PacketCount          uint64   `protobuf:"varint,1,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64   `protobuf:"varint,2,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpBucketCounter) Reset()         { *m = OfpBucketCounter{} }
+func (m *OfpBucketCounter) String() string { return proto.CompactTextString(m) }
+func (*OfpBucketCounter) ProtoMessage()    {}
+func (*OfpBucketCounter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{61}
+}
+
+func (m *OfpBucketCounter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpBucketCounter.Unmarshal(m, b)
+}
+func (m *OfpBucketCounter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpBucketCounter.Marshal(b, m, deterministic)
+}
+func (m *OfpBucketCounter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpBucketCounter.Merge(m, src)
+}
+func (m *OfpBucketCounter) XXX_Size() int {
+	return xxx_messageInfo_OfpBucketCounter.Size(m)
+}
+func (m *OfpBucketCounter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpBucketCounter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpBucketCounter proto.InternalMessageInfo
+
+func (m *OfpBucketCounter) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpBucketCounter) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+// Body of reply to OFPMP_GROUP request.
+type OfpGroupStats struct {
+	GroupId              uint32              `protobuf:"varint,1,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	RefCount             uint32              `protobuf:"varint,2,opt,name=ref_count,json=refCount,proto3" json:"ref_count,omitempty"`
+	PacketCount          uint64              `protobuf:"varint,3,opt,name=packet_count,json=packetCount,proto3" json:"packet_count,omitempty"`
+	ByteCount            uint64              `protobuf:"varint,4,opt,name=byte_count,json=byteCount,proto3" json:"byte_count,omitempty"`
+	DurationSec          uint32              `protobuf:"varint,5,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32              `protobuf:"varint,6,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	BucketStats          []*OfpBucketCounter `protobuf:"bytes,7,rep,name=bucket_stats,json=bucketStats,proto3" json:"bucket_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpGroupStats) Reset()         { *m = OfpGroupStats{} }
+func (m *OfpGroupStats) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupStats) ProtoMessage()    {}
+func (*OfpGroupStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{62}
+}
+
+func (m *OfpGroupStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupStats.Unmarshal(m, b)
+}
+func (m *OfpGroupStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupStats.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupStats.Merge(m, src)
+}
+func (m *OfpGroupStats) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupStats.Size(m)
+}
+func (m *OfpGroupStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupStats proto.InternalMessageInfo
+
+func (m *OfpGroupStats) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetRefCount() uint32 {
+	if m != nil {
+		return m.RefCount
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetPacketCount() uint64 {
+	if m != nil {
+		return m.PacketCount
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetByteCount() uint64 {
+	if m != nil {
+		return m.ByteCount
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpGroupStats) GetBucketStats() []*OfpBucketCounter {
+	if m != nil {
+		return m.BucketStats
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_GROUP_DESC request.
+type OfpGroupDesc struct {
+	Type                 OfpGroupType `protobuf:"varint,1,opt,name=type,proto3,enum=openflow_13.OfpGroupType" json:"type,omitempty"`
+	GroupId              uint32       `protobuf:"varint,2,opt,name=group_id,json=groupId,proto3" json:"group_id,omitempty"`
+	Buckets              []*OfpBucket `protobuf:"bytes,3,rep,name=buckets,proto3" json:"buckets,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *OfpGroupDesc) Reset()         { *m = OfpGroupDesc{} }
+func (m *OfpGroupDesc) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupDesc) ProtoMessage()    {}
+func (*OfpGroupDesc) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{63}
+}
+
+func (m *OfpGroupDesc) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupDesc.Unmarshal(m, b)
+}
+func (m *OfpGroupDesc) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupDesc.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupDesc) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupDesc.Merge(m, src)
+}
+func (m *OfpGroupDesc) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupDesc.Size(m)
+}
+func (m *OfpGroupDesc) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupDesc.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupDesc proto.InternalMessageInfo
+
+func (m *OfpGroupDesc) GetType() OfpGroupType {
+	if m != nil {
+		return m.Type
+	}
+	return OfpGroupType_OFPGT_ALL
+}
+
+func (m *OfpGroupDesc) GetGroupId() uint32 {
+	if m != nil {
+		return m.GroupId
+	}
+	return 0
+}
+
+func (m *OfpGroupDesc) GetBuckets() []*OfpBucket {
+	if m != nil {
+		return m.Buckets
+	}
+	return nil
+}
+
+type OfpGroupEntry struct {
+	Desc                 *OfpGroupDesc  `protobuf:"bytes,1,opt,name=desc,proto3" json:"desc,omitempty"`
+	Stats                *OfpGroupStats `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *OfpGroupEntry) Reset()         { *m = OfpGroupEntry{} }
+func (m *OfpGroupEntry) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupEntry) ProtoMessage()    {}
+func (*OfpGroupEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{64}
+}
+
+func (m *OfpGroupEntry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupEntry.Unmarshal(m, b)
+}
+func (m *OfpGroupEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupEntry.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupEntry.Merge(m, src)
+}
+func (m *OfpGroupEntry) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupEntry.Size(m)
+}
+func (m *OfpGroupEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupEntry proto.InternalMessageInfo
+
+func (m *OfpGroupEntry) GetDesc() *OfpGroupDesc {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+func (m *OfpGroupEntry) GetStats() *OfpGroupStats {
+	if m != nil {
+		return m.Stats
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_GROUP_FEATURES request. Group features.
+type OfpGroupFeatures struct {
+	Types                uint32   `protobuf:"varint,1,opt,name=types,proto3" json:"types,omitempty"`
+	Capabilities         uint32   `protobuf:"varint,2,opt,name=capabilities,proto3" json:"capabilities,omitempty"`
+	MaxGroups            []uint32 `protobuf:"varint,3,rep,packed,name=max_groups,json=maxGroups,proto3" json:"max_groups,omitempty"`
+	Actions              []uint32 `protobuf:"varint,4,rep,packed,name=actions,proto3" json:"actions,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpGroupFeatures) Reset()         { *m = OfpGroupFeatures{} }
+func (m *OfpGroupFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpGroupFeatures) ProtoMessage()    {}
+func (*OfpGroupFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{65}
+}
+
+func (m *OfpGroupFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpGroupFeatures.Unmarshal(m, b)
+}
+func (m *OfpGroupFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpGroupFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpGroupFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpGroupFeatures.Merge(m, src)
+}
+func (m *OfpGroupFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpGroupFeatures.Size(m)
+}
+func (m *OfpGroupFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpGroupFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpGroupFeatures proto.InternalMessageInfo
+
+func (m *OfpGroupFeatures) GetTypes() uint32 {
+	if m != nil {
+		return m.Types
+	}
+	return 0
+}
+
+func (m *OfpGroupFeatures) GetCapabilities() uint32 {
+	if m != nil {
+		return m.Capabilities
+	}
+	return 0
+}
+
+func (m *OfpGroupFeatures) GetMaxGroups() []uint32 {
+	if m != nil {
+		return m.MaxGroups
+	}
+	return nil
+}
+
+func (m *OfpGroupFeatures) GetActions() []uint32 {
+	if m != nil {
+		return m.Actions
+	}
+	return nil
+}
+
+// Body of OFPMP_METER and OFPMP_METER_CONFIG requests.
+type OfpMeterMultipartRequest struct {
+	MeterId              uint32   `protobuf:"varint,1,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterMultipartRequest) Reset()         { *m = OfpMeterMultipartRequest{} }
+func (m *OfpMeterMultipartRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterMultipartRequest) ProtoMessage()    {}
+func (*OfpMeterMultipartRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{66}
+}
+
+func (m *OfpMeterMultipartRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterMultipartRequest.Unmarshal(m, b)
+}
+func (m *OfpMeterMultipartRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterMultipartRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterMultipartRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterMultipartRequest.Merge(m, src)
+}
+func (m *OfpMeterMultipartRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterMultipartRequest.Size(m)
+}
+func (m *OfpMeterMultipartRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterMultipartRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterMultipartRequest proto.InternalMessageInfo
+
+func (m *OfpMeterMultipartRequest) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+// Statistics for each meter band
+type OfpMeterBandStats struct {
+	PacketBandCount      uint64   `protobuf:"varint,1,opt,name=packet_band_count,json=packetBandCount,proto3" json:"packet_band_count,omitempty"`
+	ByteBandCount        uint64   `protobuf:"varint,2,opt,name=byte_band_count,json=byteBandCount,proto3" json:"byte_band_count,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterBandStats) Reset()         { *m = OfpMeterBandStats{} }
+func (m *OfpMeterBandStats) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterBandStats) ProtoMessage()    {}
+func (*OfpMeterBandStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{67}
+}
+
+func (m *OfpMeterBandStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterBandStats.Unmarshal(m, b)
+}
+func (m *OfpMeterBandStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterBandStats.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterBandStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterBandStats.Merge(m, src)
+}
+func (m *OfpMeterBandStats) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterBandStats.Size(m)
+}
+func (m *OfpMeterBandStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterBandStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterBandStats proto.InternalMessageInfo
+
+func (m *OfpMeterBandStats) GetPacketBandCount() uint64 {
+	if m != nil {
+		return m.PacketBandCount
+	}
+	return 0
+}
+
+func (m *OfpMeterBandStats) GetByteBandCount() uint64 {
+	if m != nil {
+		return m.ByteBandCount
+	}
+	return 0
+}
+
+// Body of reply to OFPMP_METER request. Meter statistics.
+type OfpMeterStats struct {
+	MeterId              uint32               `protobuf:"varint,1,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	FlowCount            uint32               `protobuf:"varint,2,opt,name=flow_count,json=flowCount,proto3" json:"flow_count,omitempty"`
+	PacketInCount        uint64               `protobuf:"varint,3,opt,name=packet_in_count,json=packetInCount,proto3" json:"packet_in_count,omitempty"`
+	ByteInCount          uint64               `protobuf:"varint,4,opt,name=byte_in_count,json=byteInCount,proto3" json:"byte_in_count,omitempty"`
+	DurationSec          uint32               `protobuf:"varint,5,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32               `protobuf:"varint,6,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	BandStats            []*OfpMeterBandStats `protobuf:"bytes,7,rep,name=band_stats,json=bandStats,proto3" json:"band_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *OfpMeterStats) Reset()         { *m = OfpMeterStats{} }
+func (m *OfpMeterStats) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterStats) ProtoMessage()    {}
+func (*OfpMeterStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{68}
+}
+
+func (m *OfpMeterStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterStats.Unmarshal(m, b)
+}
+func (m *OfpMeterStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterStats.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterStats.Merge(m, src)
+}
+func (m *OfpMeterStats) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterStats.Size(m)
+}
+func (m *OfpMeterStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterStats proto.InternalMessageInfo
+
+func (m *OfpMeterStats) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetFlowCount() uint32 {
+	if m != nil {
+		return m.FlowCount
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetPacketInCount() uint64 {
+	if m != nil {
+		return m.PacketInCount
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetByteInCount() uint64 {
+	if m != nil {
+		return m.ByteInCount
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+func (m *OfpMeterStats) GetBandStats() []*OfpMeterBandStats {
+	if m != nil {
+		return m.BandStats
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_METER_CONFIG request. Meter configuration.
+type OfpMeterConfig struct {
+	Flags                uint32                `protobuf:"varint,1,opt,name=flags,proto3" json:"flags,omitempty"`
+	MeterId              uint32                `protobuf:"varint,2,opt,name=meter_id,json=meterId,proto3" json:"meter_id,omitempty"`
+	Bands                []*OfpMeterBandHeader `protobuf:"bytes,3,rep,name=bands,proto3" json:"bands,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpMeterConfig) Reset()         { *m = OfpMeterConfig{} }
+func (m *OfpMeterConfig) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterConfig) ProtoMessage()    {}
+func (*OfpMeterConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{69}
+}
+
+func (m *OfpMeterConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterConfig.Unmarshal(m, b)
+}
+func (m *OfpMeterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterConfig.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterConfig.Merge(m, src)
+}
+func (m *OfpMeterConfig) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterConfig.Size(m)
+}
+func (m *OfpMeterConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterConfig proto.InternalMessageInfo
+
+func (m *OfpMeterConfig) GetFlags() uint32 {
+	if m != nil {
+		return m.Flags
+	}
+	return 0
+}
+
+func (m *OfpMeterConfig) GetMeterId() uint32 {
+	if m != nil {
+		return m.MeterId
+	}
+	return 0
+}
+
+func (m *OfpMeterConfig) GetBands() []*OfpMeterBandHeader {
+	if m != nil {
+		return m.Bands
+	}
+	return nil
+}
+
+// Body of reply to OFPMP_METER_FEATURES request. Meter features.
+type OfpMeterFeatures struct {
+	MaxMeter             uint32   `protobuf:"varint,1,opt,name=max_meter,json=maxMeter,proto3" json:"max_meter,omitempty"`
+	BandTypes            uint32   `protobuf:"varint,2,opt,name=band_types,json=bandTypes,proto3" json:"band_types,omitempty"`
+	Capabilities         uint32   `protobuf:"varint,3,opt,name=capabilities,proto3" json:"capabilities,omitempty"`
+	MaxBands             uint32   `protobuf:"varint,4,opt,name=max_bands,json=maxBands,proto3" json:"max_bands,omitempty"`
+	MaxColor             uint32   `protobuf:"varint,5,opt,name=max_color,json=maxColor,proto3" json:"max_color,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpMeterFeatures) Reset()         { *m = OfpMeterFeatures{} }
+func (m *OfpMeterFeatures) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterFeatures) ProtoMessage()    {}
+func (*OfpMeterFeatures) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{70}
+}
+
+func (m *OfpMeterFeatures) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterFeatures.Unmarshal(m, b)
+}
+func (m *OfpMeterFeatures) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterFeatures.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterFeatures) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterFeatures.Merge(m, src)
+}
+func (m *OfpMeterFeatures) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterFeatures.Size(m)
+}
+func (m *OfpMeterFeatures) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterFeatures.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterFeatures proto.InternalMessageInfo
+
+func (m *OfpMeterFeatures) GetMaxMeter() uint32 {
+	if m != nil {
+		return m.MaxMeter
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetBandTypes() uint32 {
+	if m != nil {
+		return m.BandTypes
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetCapabilities() uint32 {
+	if m != nil {
+		return m.Capabilities
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetMaxBands() uint32 {
+	if m != nil {
+		return m.MaxBands
+	}
+	return 0
+}
+
+func (m *OfpMeterFeatures) GetMaxColor() uint32 {
+	if m != nil {
+		return m.MaxColor
+	}
+	return 0
+}
+
+type OfpMeterEntry struct {
+	Config               *OfpMeterConfig `protobuf:"bytes,1,opt,name=config,proto3" json:"config,omitempty"`
+	Stats                *OfpMeterStats  `protobuf:"bytes,2,opt,name=stats,proto3" json:"stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *OfpMeterEntry) Reset()         { *m = OfpMeterEntry{} }
+func (m *OfpMeterEntry) String() string { return proto.CompactTextString(m) }
+func (*OfpMeterEntry) ProtoMessage()    {}
+func (*OfpMeterEntry) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{71}
+}
+
+func (m *OfpMeterEntry) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpMeterEntry.Unmarshal(m, b)
+}
+func (m *OfpMeterEntry) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpMeterEntry.Marshal(b, m, deterministic)
+}
+func (m *OfpMeterEntry) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpMeterEntry.Merge(m, src)
+}
+func (m *OfpMeterEntry) XXX_Size() int {
+	return xxx_messageInfo_OfpMeterEntry.Size(m)
+}
+func (m *OfpMeterEntry) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpMeterEntry.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpMeterEntry proto.InternalMessageInfo
+
+func (m *OfpMeterEntry) GetConfig() *OfpMeterConfig {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *OfpMeterEntry) GetStats() *OfpMeterStats {
+	if m != nil {
+		return m.Stats
+	}
+	return nil
+}
+
+// Body for ofp_multipart_request/reply of type OFPMP_EXPERIMENTER.
+type OfpExperimenterMultipartHeader struct {
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	ExpType              uint32   `protobuf:"varint,2,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	Data                 []byte   `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpExperimenterMultipartHeader) Reset()         { *m = OfpExperimenterMultipartHeader{} }
+func (m *OfpExperimenterMultipartHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpExperimenterMultipartHeader) ProtoMessage()    {}
+func (*OfpExperimenterMultipartHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{72}
+}
+
+func (m *OfpExperimenterMultipartHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpExperimenterMultipartHeader.Unmarshal(m, b)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpExperimenterMultipartHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpExperimenterMultipartHeader.Merge(m, src)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpExperimenterMultipartHeader.Size(m)
+}
+func (m *OfpExperimenterMultipartHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpExperimenterMultipartHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpExperimenterMultipartHeader proto.InternalMessageInfo
+
+func (m *OfpExperimenterMultipartHeader) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpExperimenterMultipartHeader) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpExperimenterMultipartHeader) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Experimenter extension.
+type OfpExperimenterHeader struct {
+	//ofp_header header;  /* Type OFPT_EXPERIMENTER. */
+	Experimenter         uint32   `protobuf:"varint,1,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	ExpType              uint32   `protobuf:"varint,2,opt,name=exp_type,json=expType,proto3" json:"exp_type,omitempty"`
+	Data                 []byte   `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpExperimenterHeader) Reset()         { *m = OfpExperimenterHeader{} }
+func (m *OfpExperimenterHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpExperimenterHeader) ProtoMessage()    {}
+func (*OfpExperimenterHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{73}
+}
+
+func (m *OfpExperimenterHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpExperimenterHeader.Unmarshal(m, b)
+}
+func (m *OfpExperimenterHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpExperimenterHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpExperimenterHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpExperimenterHeader.Merge(m, src)
+}
+func (m *OfpExperimenterHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpExperimenterHeader.Size(m)
+}
+func (m *OfpExperimenterHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpExperimenterHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpExperimenterHeader proto.InternalMessageInfo
+
+func (m *OfpExperimenterHeader) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpExperimenterHeader) GetExpType() uint32 {
+	if m != nil {
+		return m.ExpType
+	}
+	return 0
+}
+
+func (m *OfpExperimenterHeader) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Common description for a queue.
+type OfpQueuePropHeader struct {
+	Property             uint32   `protobuf:"varint,1,opt,name=property,proto3" json:"property,omitempty"`
+	Len                  uint32   `protobuf:"varint,2,opt,name=len,proto3" json:"len,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueuePropHeader) Reset()         { *m = OfpQueuePropHeader{} }
+func (m *OfpQueuePropHeader) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropHeader) ProtoMessage()    {}
+func (*OfpQueuePropHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{74}
+}
+
+func (m *OfpQueuePropHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropHeader.Unmarshal(m, b)
+}
+func (m *OfpQueuePropHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropHeader.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropHeader.Merge(m, src)
+}
+func (m *OfpQueuePropHeader) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropHeader.Size(m)
+}
+func (m *OfpQueuePropHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropHeader proto.InternalMessageInfo
+
+func (m *OfpQueuePropHeader) GetProperty() uint32 {
+	if m != nil {
+		return m.Property
+	}
+	return 0
+}
+
+func (m *OfpQueuePropHeader) GetLen() uint32 {
+	if m != nil {
+		return m.Len
+	}
+	return 0
+}
+
+// Min-Rate queue property description.
+type OfpQueuePropMinRate struct {
+	PropHeader           *OfpQueuePropHeader `protobuf:"bytes,1,opt,name=prop_header,json=propHeader,proto3" json:"prop_header,omitempty"`
+	Rate                 uint32              `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpQueuePropMinRate) Reset()         { *m = OfpQueuePropMinRate{} }
+func (m *OfpQueuePropMinRate) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropMinRate) ProtoMessage()    {}
+func (*OfpQueuePropMinRate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{75}
+}
+
+func (m *OfpQueuePropMinRate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropMinRate.Unmarshal(m, b)
+}
+func (m *OfpQueuePropMinRate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropMinRate.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropMinRate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropMinRate.Merge(m, src)
+}
+func (m *OfpQueuePropMinRate) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropMinRate.Size(m)
+}
+func (m *OfpQueuePropMinRate) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropMinRate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropMinRate proto.InternalMessageInfo
+
+func (m *OfpQueuePropMinRate) GetPropHeader() *OfpQueuePropHeader {
+	if m != nil {
+		return m.PropHeader
+	}
+	return nil
+}
+
+func (m *OfpQueuePropMinRate) GetRate() uint32 {
+	if m != nil {
+		return m.Rate
+	}
+	return 0
+}
+
+// Max-Rate queue property description.
+type OfpQueuePropMaxRate struct {
+	PropHeader           *OfpQueuePropHeader `protobuf:"bytes,1,opt,name=prop_header,json=propHeader,proto3" json:"prop_header,omitempty"`
+	Rate                 uint32              `protobuf:"varint,2,opt,name=rate,proto3" json:"rate,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpQueuePropMaxRate) Reset()         { *m = OfpQueuePropMaxRate{} }
+func (m *OfpQueuePropMaxRate) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropMaxRate) ProtoMessage()    {}
+func (*OfpQueuePropMaxRate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{76}
+}
+
+func (m *OfpQueuePropMaxRate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropMaxRate.Unmarshal(m, b)
+}
+func (m *OfpQueuePropMaxRate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropMaxRate.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropMaxRate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropMaxRate.Merge(m, src)
+}
+func (m *OfpQueuePropMaxRate) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropMaxRate.Size(m)
+}
+func (m *OfpQueuePropMaxRate) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropMaxRate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropMaxRate proto.InternalMessageInfo
+
+func (m *OfpQueuePropMaxRate) GetPropHeader() *OfpQueuePropHeader {
+	if m != nil {
+		return m.PropHeader
+	}
+	return nil
+}
+
+func (m *OfpQueuePropMaxRate) GetRate() uint32 {
+	if m != nil {
+		return m.Rate
+	}
+	return 0
+}
+
+// Experimenter queue property description.
+type OfpQueuePropExperimenter struct {
+	PropHeader           *OfpQueuePropHeader `protobuf:"bytes,1,opt,name=prop_header,json=propHeader,proto3" json:"prop_header,omitempty"`
+	Experimenter         uint32              `protobuf:"varint,2,opt,name=experimenter,proto3" json:"experimenter,omitempty"`
+	Data                 []byte              `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *OfpQueuePropExperimenter) Reset()         { *m = OfpQueuePropExperimenter{} }
+func (m *OfpQueuePropExperimenter) String() string { return proto.CompactTextString(m) }
+func (*OfpQueuePropExperimenter) ProtoMessage()    {}
+func (*OfpQueuePropExperimenter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{77}
+}
+
+func (m *OfpQueuePropExperimenter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueuePropExperimenter.Unmarshal(m, b)
+}
+func (m *OfpQueuePropExperimenter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueuePropExperimenter.Marshal(b, m, deterministic)
+}
+func (m *OfpQueuePropExperimenter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueuePropExperimenter.Merge(m, src)
+}
+func (m *OfpQueuePropExperimenter) XXX_Size() int {
+	return xxx_messageInfo_OfpQueuePropExperimenter.Size(m)
+}
+func (m *OfpQueuePropExperimenter) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueuePropExperimenter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueuePropExperimenter proto.InternalMessageInfo
+
+func (m *OfpQueuePropExperimenter) GetPropHeader() *OfpQueuePropHeader {
+	if m != nil {
+		return m.PropHeader
+	}
+	return nil
+}
+
+func (m *OfpQueuePropExperimenter) GetExperimenter() uint32 {
+	if m != nil {
+		return m.Experimenter
+	}
+	return 0
+}
+
+func (m *OfpQueuePropExperimenter) GetData() []byte {
+	if m != nil {
+		return m.Data
+	}
+	return nil
+}
+
+// Full description for a queue.
+type OfpPacketQueue struct {
+	QueueId              uint32                `protobuf:"varint,1,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	Port                 uint32                `protobuf:"varint,2,opt,name=port,proto3" json:"port,omitempty"`
+	Properties           []*OfpQueuePropHeader `protobuf:"bytes,4,rep,name=properties,proto3" json:"properties,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *OfpPacketQueue) Reset()         { *m = OfpPacketQueue{} }
+func (m *OfpPacketQueue) String() string { return proto.CompactTextString(m) }
+func (*OfpPacketQueue) ProtoMessage()    {}
+func (*OfpPacketQueue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{78}
+}
+
+func (m *OfpPacketQueue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpPacketQueue.Unmarshal(m, b)
+}
+func (m *OfpPacketQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpPacketQueue.Marshal(b, m, deterministic)
+}
+func (m *OfpPacketQueue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpPacketQueue.Merge(m, src)
+}
+func (m *OfpPacketQueue) XXX_Size() int {
+	return xxx_messageInfo_OfpPacketQueue.Size(m)
+}
+func (m *OfpPacketQueue) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpPacketQueue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpPacketQueue proto.InternalMessageInfo
+
+func (m *OfpPacketQueue) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+func (m *OfpPacketQueue) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *OfpPacketQueue) GetProperties() []*OfpQueuePropHeader {
+	if m != nil {
+		return m.Properties
+	}
+	return nil
+}
+
+// Query for port queue configuration.
+type OfpQueueGetConfigRequest struct {
+	//ofp_header header;
+	Port                 uint32   `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueueGetConfigRequest) Reset()         { *m = OfpQueueGetConfigRequest{} }
+func (m *OfpQueueGetConfigRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueGetConfigRequest) ProtoMessage()    {}
+func (*OfpQueueGetConfigRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{79}
+}
+
+func (m *OfpQueueGetConfigRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueGetConfigRequest.Unmarshal(m, b)
+}
+func (m *OfpQueueGetConfigRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueGetConfigRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueGetConfigRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueGetConfigRequest.Merge(m, src)
+}
+func (m *OfpQueueGetConfigRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueGetConfigRequest.Size(m)
+}
+func (m *OfpQueueGetConfigRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueGetConfigRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueGetConfigRequest proto.InternalMessageInfo
+
+func (m *OfpQueueGetConfigRequest) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+// Queue configuration for a given port.
+type OfpQueueGetConfigReply struct {
+	//ofp_header header;
+	Port                 uint32            `protobuf:"varint,1,opt,name=port,proto3" json:"port,omitempty"`
+	Queues               []*OfpPacketQueue `protobuf:"bytes,2,rep,name=queues,proto3" json:"queues,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpQueueGetConfigReply) Reset()         { *m = OfpQueueGetConfigReply{} }
+func (m *OfpQueueGetConfigReply) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueGetConfigReply) ProtoMessage()    {}
+func (*OfpQueueGetConfigReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{80}
+}
+
+func (m *OfpQueueGetConfigReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueGetConfigReply.Unmarshal(m, b)
+}
+func (m *OfpQueueGetConfigReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueGetConfigReply.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueGetConfigReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueGetConfigReply.Merge(m, src)
+}
+func (m *OfpQueueGetConfigReply) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueGetConfigReply.Size(m)
+}
+func (m *OfpQueueGetConfigReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueGetConfigReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueGetConfigReply proto.InternalMessageInfo
+
+func (m *OfpQueueGetConfigReply) GetPort() uint32 {
+	if m != nil {
+		return m.Port
+	}
+	return 0
+}
+
+func (m *OfpQueueGetConfigReply) GetQueues() []*OfpPacketQueue {
+	if m != nil {
+		return m.Queues
+	}
+	return nil
+}
+
+// OFPAT_SET_QUEUE action struct: send packets to given queue on port.
+type OfpActionSetQueue struct {
+	Type                 uint32   `protobuf:"varint,1,opt,name=type,proto3" json:"type,omitempty"`
+	QueueId              uint32   `protobuf:"varint,3,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpActionSetQueue) Reset()         { *m = OfpActionSetQueue{} }
+func (m *OfpActionSetQueue) String() string { return proto.CompactTextString(m) }
+func (*OfpActionSetQueue) ProtoMessage()    {}
+func (*OfpActionSetQueue) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{81}
+}
+
+func (m *OfpActionSetQueue) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpActionSetQueue.Unmarshal(m, b)
+}
+func (m *OfpActionSetQueue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpActionSetQueue.Marshal(b, m, deterministic)
+}
+func (m *OfpActionSetQueue) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpActionSetQueue.Merge(m, src)
+}
+func (m *OfpActionSetQueue) XXX_Size() int {
+	return xxx_messageInfo_OfpActionSetQueue.Size(m)
+}
+func (m *OfpActionSetQueue) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpActionSetQueue.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpActionSetQueue proto.InternalMessageInfo
+
+func (m *OfpActionSetQueue) GetType() uint32 {
+	if m != nil {
+		return m.Type
+	}
+	return 0
+}
+
+func (m *OfpActionSetQueue) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+type OfpQueueStatsRequest struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	QueueId              uint32   `protobuf:"varint,2,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueueStatsRequest) Reset()         { *m = OfpQueueStatsRequest{} }
+func (m *OfpQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueStatsRequest) ProtoMessage()    {}
+func (*OfpQueueStatsRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{82}
+}
+
+func (m *OfpQueueStatsRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueStatsRequest.Unmarshal(m, b)
+}
+func (m *OfpQueueStatsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueStatsRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueStatsRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueStatsRequest.Merge(m, src)
+}
+func (m *OfpQueueStatsRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueStatsRequest.Size(m)
+}
+func (m *OfpQueueStatsRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueStatsRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueStatsRequest proto.InternalMessageInfo
+
+func (m *OfpQueueStatsRequest) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpQueueStatsRequest) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+type OfpQueueStats struct {
+	PortNo               uint32   `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	QueueId              uint32   `protobuf:"varint,2,opt,name=queue_id,json=queueId,proto3" json:"queue_id,omitempty"`
+	TxBytes              uint64   `protobuf:"varint,3,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxPackets            uint64   `protobuf:"varint,4,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	TxErrors             uint64   `protobuf:"varint,5,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+	DurationSec          uint32   `protobuf:"varint,6,opt,name=duration_sec,json=durationSec,proto3" json:"duration_sec,omitempty"`
+	DurationNsec         uint32   `protobuf:"varint,7,opt,name=duration_nsec,json=durationNsec,proto3" json:"duration_nsec,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpQueueStats) Reset()         { *m = OfpQueueStats{} }
+func (m *OfpQueueStats) String() string { return proto.CompactTextString(m) }
+func (*OfpQueueStats) ProtoMessage()    {}
+func (*OfpQueueStats) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{83}
+}
+
+func (m *OfpQueueStats) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpQueueStats.Unmarshal(m, b)
+}
+func (m *OfpQueueStats) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpQueueStats.Marshal(b, m, deterministic)
+}
+func (m *OfpQueueStats) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpQueueStats.Merge(m, src)
+}
+func (m *OfpQueueStats) XXX_Size() int {
+	return xxx_messageInfo_OfpQueueStats.Size(m)
+}
+func (m *OfpQueueStats) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpQueueStats.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpQueueStats proto.InternalMessageInfo
+
+func (m *OfpQueueStats) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetQueueId() uint32 {
+	if m != nil {
+		return m.QueueId
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetTxErrors() uint64 {
+	if m != nil {
+		return m.TxErrors
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetDurationSec() uint32 {
+	if m != nil {
+		return m.DurationSec
+	}
+	return 0
+}
+
+func (m *OfpQueueStats) GetDurationNsec() uint32 {
+	if m != nil {
+		return m.DurationNsec
+	}
+	return 0
+}
+
+// Role request and reply message.
+type OfpRoleRequest struct {
+	//ofp_header header;        /* Type OFPT_ROLE_REQUEST/OFPT_ROLE_REPLY. */
+	Role                 OfpControllerRole `protobuf:"varint,1,opt,name=role,proto3,enum=openflow_13.OfpControllerRole" json:"role,omitempty"`
+	GenerationId         uint64            `protobuf:"varint,2,opt,name=generation_id,json=generationId,proto3" json:"generation_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *OfpRoleRequest) Reset()         { *m = OfpRoleRequest{} }
+func (m *OfpRoleRequest) String() string { return proto.CompactTextString(m) }
+func (*OfpRoleRequest) ProtoMessage()    {}
+func (*OfpRoleRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{84}
+}
+
+func (m *OfpRoleRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpRoleRequest.Unmarshal(m, b)
+}
+func (m *OfpRoleRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpRoleRequest.Marshal(b, m, deterministic)
+}
+func (m *OfpRoleRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpRoleRequest.Merge(m, src)
+}
+func (m *OfpRoleRequest) XXX_Size() int {
+	return xxx_messageInfo_OfpRoleRequest.Size(m)
+}
+func (m *OfpRoleRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpRoleRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpRoleRequest proto.InternalMessageInfo
+
+func (m *OfpRoleRequest) GetRole() OfpControllerRole {
+	if m != nil {
+		return m.Role
+	}
+	return OfpControllerRole_OFPCR_ROLE_NOCHANGE
+}
+
+func (m *OfpRoleRequest) GetGenerationId() uint64 {
+	if m != nil {
+		return m.GenerationId
+	}
+	return 0
+}
+
+// Asynchronous message configuration.
+type OfpAsyncConfig struct {
+	//ofp_header header;    /* OFPT_GET_ASYNC_REPLY or OFPT_SET_ASYNC. */
+	PacketInMask         []uint32 `protobuf:"varint,1,rep,packed,name=packet_in_mask,json=packetInMask,proto3" json:"packet_in_mask,omitempty"`
+	PortStatusMask       []uint32 `protobuf:"varint,2,rep,packed,name=port_status_mask,json=portStatusMask,proto3" json:"port_status_mask,omitempty"`
+	FlowRemovedMask      []uint32 `protobuf:"varint,3,rep,packed,name=flow_removed_mask,json=flowRemovedMask,proto3" json:"flow_removed_mask,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OfpAsyncConfig) Reset()         { *m = OfpAsyncConfig{} }
+func (m *OfpAsyncConfig) String() string { return proto.CompactTextString(m) }
+func (*OfpAsyncConfig) ProtoMessage()    {}
+func (*OfpAsyncConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{85}
+}
+
+func (m *OfpAsyncConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OfpAsyncConfig.Unmarshal(m, b)
+}
+func (m *OfpAsyncConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OfpAsyncConfig.Marshal(b, m, deterministic)
+}
+func (m *OfpAsyncConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OfpAsyncConfig.Merge(m, src)
+}
+func (m *OfpAsyncConfig) XXX_Size() int {
+	return xxx_messageInfo_OfpAsyncConfig.Size(m)
+}
+func (m *OfpAsyncConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_OfpAsyncConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OfpAsyncConfig proto.InternalMessageInfo
+
+func (m *OfpAsyncConfig) GetPacketInMask() []uint32 {
+	if m != nil {
+		return m.PacketInMask
+	}
+	return nil
+}
+
+func (m *OfpAsyncConfig) GetPortStatusMask() []uint32 {
+	if m != nil {
+		return m.PortStatusMask
+	}
+	return nil
+}
+
+func (m *OfpAsyncConfig) GetFlowRemovedMask() []uint32 {
+	if m != nil {
+		return m.FlowRemovedMask
+	}
+	return nil
+}
+
+type MeterModUpdate struct {
+	Id                   string       `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	MeterMod             *OfpMeterMod `protobuf:"bytes,2,opt,name=meter_mod,json=meterMod,proto3" json:"meter_mod,omitempty"`
+	Xid                  uint32       `protobuf:"varint,3,opt,name=xid,proto3" json:"xid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *MeterModUpdate) Reset()         { *m = MeterModUpdate{} }
+func (m *MeterModUpdate) String() string { return proto.CompactTextString(m) }
+func (*MeterModUpdate) ProtoMessage()    {}
+func (*MeterModUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{86}
+}
+
+func (m *MeterModUpdate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MeterModUpdate.Unmarshal(m, b)
+}
+func (m *MeterModUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MeterModUpdate.Marshal(b, m, deterministic)
+}
+func (m *MeterModUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MeterModUpdate.Merge(m, src)
+}
+func (m *MeterModUpdate) XXX_Size() int {
+	return xxx_messageInfo_MeterModUpdate.Size(m)
+}
+func (m *MeterModUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_MeterModUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MeterModUpdate proto.InternalMessageInfo
+
+func (m *MeterModUpdate) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *MeterModUpdate) GetMeterMod() *OfpMeterMod {
+	if m != nil {
+		return m.MeterMod
+	}
+	return nil
+}
+
+func (m *MeterModUpdate) GetXid() uint32 {
+	if m != nil {
+		return m.Xid
+	}
+	return 0
+}
+
+type MeterStatsReply struct {
+	MeterStats           []*OfpMeterStats `protobuf:"bytes,1,rep,name=meter_stats,json=meterStats,proto3" json:"meter_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *MeterStatsReply) Reset()         { *m = MeterStatsReply{} }
+func (m *MeterStatsReply) String() string { return proto.CompactTextString(m) }
+func (*MeterStatsReply) ProtoMessage()    {}
+func (*MeterStatsReply) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{87}
+}
+
+func (m *MeterStatsReply) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MeterStatsReply.Unmarshal(m, b)
+}
+func (m *MeterStatsReply) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MeterStatsReply.Marshal(b, m, deterministic)
+}
+func (m *MeterStatsReply) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MeterStatsReply.Merge(m, src)
+}
+func (m *MeterStatsReply) XXX_Size() int {
+	return xxx_messageInfo_MeterStatsReply.Size(m)
+}
+func (m *MeterStatsReply) XXX_DiscardUnknown() {
+	xxx_messageInfo_MeterStatsReply.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MeterStatsReply proto.InternalMessageInfo
+
+func (m *MeterStatsReply) GetMeterStats() []*OfpMeterStats {
+	if m != nil {
+		return m.MeterStats
+	}
+	return nil
+}
+
+type FlowTableUpdate struct {
+	Id                   string      `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	FlowMod              *OfpFlowMod `protobuf:"bytes,2,opt,name=flow_mod,json=flowMod,proto3" json:"flow_mod,omitempty"`
+	Xid                  uint32      `protobuf:"varint,3,opt,name=xid,proto3" json:"xid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *FlowTableUpdate) Reset()         { *m = FlowTableUpdate{} }
+func (m *FlowTableUpdate) String() string { return proto.CompactTextString(m) }
+func (*FlowTableUpdate) ProtoMessage()    {}
+func (*FlowTableUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{88}
+}
+
+func (m *FlowTableUpdate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowTableUpdate.Unmarshal(m, b)
+}
+func (m *FlowTableUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowTableUpdate.Marshal(b, m, deterministic)
+}
+func (m *FlowTableUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowTableUpdate.Merge(m, src)
+}
+func (m *FlowTableUpdate) XXX_Size() int {
+	return xxx_messageInfo_FlowTableUpdate.Size(m)
+}
+func (m *FlowTableUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowTableUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowTableUpdate proto.InternalMessageInfo
+
+func (m *FlowTableUpdate) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *FlowTableUpdate) GetFlowMod() *OfpFlowMod {
+	if m != nil {
+		return m.FlowMod
+	}
+	return nil
+}
+
+func (m *FlowTableUpdate) GetXid() uint32 {
+	if m != nil {
+		return m.Xid
+	}
+	return 0
+}
+
+type FlowGroupTableUpdate struct {
+	Id                   string       `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	GroupMod             *OfpGroupMod `protobuf:"bytes,2,opt,name=group_mod,json=groupMod,proto3" json:"group_mod,omitempty"`
+	Xid                  uint32       `protobuf:"varint,3,opt,name=xid,proto3" json:"xid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *FlowGroupTableUpdate) Reset()         { *m = FlowGroupTableUpdate{} }
+func (m *FlowGroupTableUpdate) String() string { return proto.CompactTextString(m) }
+func (*FlowGroupTableUpdate) ProtoMessage()    {}
+func (*FlowGroupTableUpdate) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{89}
+}
+
+func (m *FlowGroupTableUpdate) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowGroupTableUpdate.Unmarshal(m, b)
+}
+func (m *FlowGroupTableUpdate) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowGroupTableUpdate.Marshal(b, m, deterministic)
+}
+func (m *FlowGroupTableUpdate) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowGroupTableUpdate.Merge(m, src)
+}
+func (m *FlowGroupTableUpdate) XXX_Size() int {
+	return xxx_messageInfo_FlowGroupTableUpdate.Size(m)
+}
+func (m *FlowGroupTableUpdate) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowGroupTableUpdate.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowGroupTableUpdate proto.InternalMessageInfo
+
+func (m *FlowGroupTableUpdate) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *FlowGroupTableUpdate) GetGroupMod() *OfpGroupMod {
+	if m != nil {
+		return m.GroupMod
+	}
+	return nil
+}
+
+func (m *FlowGroupTableUpdate) GetXid() uint32 {
+	if m != nil {
+		return m.Xid
+	}
+	return 0
+}
+
+type Flows struct {
+	Items                []*OfpFlowStats `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *Flows) Reset()         { *m = Flows{} }
+func (m *Flows) String() string { return proto.CompactTextString(m) }
+func (*Flows) ProtoMessage()    {}
+func (*Flows) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{90}
+}
+
+func (m *Flows) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Flows.Unmarshal(m, b)
+}
+func (m *Flows) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Flows.Marshal(b, m, deterministic)
+}
+func (m *Flows) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Flows.Merge(m, src)
+}
+func (m *Flows) XXX_Size() int {
+	return xxx_messageInfo_Flows.Size(m)
+}
+func (m *Flows) XXX_DiscardUnknown() {
+	xxx_messageInfo_Flows.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Flows proto.InternalMessageInfo
+
+func (m *Flows) GetItems() []*OfpFlowStats {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type Meters struct {
+	Items                []*OfpMeterEntry `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Meters) Reset()         { *m = Meters{} }
+func (m *Meters) String() string { return proto.CompactTextString(m) }
+func (*Meters) ProtoMessage()    {}
+func (*Meters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{91}
+}
+
+func (m *Meters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Meters.Unmarshal(m, b)
+}
+func (m *Meters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Meters.Marshal(b, m, deterministic)
+}
+func (m *Meters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Meters.Merge(m, src)
+}
+func (m *Meters) XXX_Size() int {
+	return xxx_messageInfo_Meters.Size(m)
+}
+func (m *Meters) XXX_DiscardUnknown() {
+	xxx_messageInfo_Meters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Meters proto.InternalMessageInfo
+
+func (m *Meters) GetItems() []*OfpMeterEntry {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type FlowGroups struct {
+	Items                []*OfpGroupEntry `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *FlowGroups) Reset()         { *m = FlowGroups{} }
+func (m *FlowGroups) String() string { return proto.CompactTextString(m) }
+func (*FlowGroups) ProtoMessage()    {}
+func (*FlowGroups) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{92}
+}
+
+func (m *FlowGroups) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowGroups.Unmarshal(m, b)
+}
+func (m *FlowGroups) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowGroups.Marshal(b, m, deterministic)
+}
+func (m *FlowGroups) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowGroups.Merge(m, src)
+}
+func (m *FlowGroups) XXX_Size() int {
+	return xxx_messageInfo_FlowGroups.Size(m)
+}
+func (m *FlowGroups) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowGroups.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowGroups proto.InternalMessageInfo
+
+func (m *FlowGroups) GetItems() []*OfpGroupEntry {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type FlowChanges struct {
+	ToAdd                *Flows   `protobuf:"bytes,1,opt,name=to_add,json=toAdd,proto3" json:"to_add,omitempty"`
+	ToRemove             *Flows   `protobuf:"bytes,2,opt,name=to_remove,json=toRemove,proto3" json:"to_remove,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *FlowChanges) Reset()         { *m = FlowChanges{} }
+func (m *FlowChanges) String() string { return proto.CompactTextString(m) }
+func (*FlowChanges) ProtoMessage()    {}
+func (*FlowChanges) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{93}
+}
+
+func (m *FlowChanges) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowChanges.Unmarshal(m, b)
+}
+func (m *FlowChanges) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowChanges.Marshal(b, m, deterministic)
+}
+func (m *FlowChanges) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowChanges.Merge(m, src)
+}
+func (m *FlowChanges) XXX_Size() int {
+	return xxx_messageInfo_FlowChanges.Size(m)
+}
+func (m *FlowChanges) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowChanges.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowChanges proto.InternalMessageInfo
+
+func (m *FlowChanges) GetToAdd() *Flows {
+	if m != nil {
+		return m.ToAdd
+	}
+	return nil
+}
+
+func (m *FlowChanges) GetToRemove() *Flows {
+	if m != nil {
+		return m.ToRemove
+	}
+	return nil
+}
+
+type FlowGroupChanges struct {
+	ToAdd                *FlowGroups `protobuf:"bytes,1,opt,name=to_add,json=toAdd,proto3" json:"to_add,omitempty"`
+	ToRemove             *FlowGroups `protobuf:"bytes,2,opt,name=to_remove,json=toRemove,proto3" json:"to_remove,omitempty"`
+	ToUpdate             *FlowGroups `protobuf:"bytes,3,opt,name=to_update,json=toUpdate,proto3" json:"to_update,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *FlowGroupChanges) Reset()         { *m = FlowGroupChanges{} }
+func (m *FlowGroupChanges) String() string { return proto.CompactTextString(m) }
+func (*FlowGroupChanges) ProtoMessage()    {}
+func (*FlowGroupChanges) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{94}
+}
+
+func (m *FlowGroupChanges) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowGroupChanges.Unmarshal(m, b)
+}
+func (m *FlowGroupChanges) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowGroupChanges.Marshal(b, m, deterministic)
+}
+func (m *FlowGroupChanges) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowGroupChanges.Merge(m, src)
+}
+func (m *FlowGroupChanges) XXX_Size() int {
+	return xxx_messageInfo_FlowGroupChanges.Size(m)
+}
+func (m *FlowGroupChanges) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowGroupChanges.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowGroupChanges proto.InternalMessageInfo
+
+func (m *FlowGroupChanges) GetToAdd() *FlowGroups {
+	if m != nil {
+		return m.ToAdd
+	}
+	return nil
+}
+
+func (m *FlowGroupChanges) GetToRemove() *FlowGroups {
+	if m != nil {
+		return m.ToRemove
+	}
+	return nil
+}
+
+func (m *FlowGroupChanges) GetToUpdate() *FlowGroups {
+	if m != nil {
+		return m.ToUpdate
+	}
+	return nil
+}
+
+type PacketIn struct {
+	Id                   string       `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	PacketIn             *OfpPacketIn `protobuf:"bytes,2,opt,name=packet_in,json=packetIn,proto3" json:"packet_in,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}     `json:"-"`
+	XXX_unrecognized     []byte       `json:"-"`
+	XXX_sizecache        int32        `json:"-"`
+}
+
+func (m *PacketIn) Reset()         { *m = PacketIn{} }
+func (m *PacketIn) String() string { return proto.CompactTextString(m) }
+func (*PacketIn) ProtoMessage()    {}
+func (*PacketIn) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{95}
+}
+
+func (m *PacketIn) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketIn.Unmarshal(m, b)
+}
+func (m *PacketIn) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketIn.Marshal(b, m, deterministic)
+}
+func (m *PacketIn) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketIn.Merge(m, src)
+}
+func (m *PacketIn) XXX_Size() int {
+	return xxx_messageInfo_PacketIn.Size(m)
+}
+func (m *PacketIn) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketIn.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketIn proto.InternalMessageInfo
+
+func (m *PacketIn) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PacketIn) GetPacketIn() *OfpPacketIn {
+	if m != nil {
+		return m.PacketIn
+	}
+	return nil
+}
+
+type PacketOut struct {
+	Id                   string        `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	PacketOut            *OfpPacketOut `protobuf:"bytes,2,opt,name=packet_out,json=packetOut,proto3" json:"packet_out,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *PacketOut) Reset()         { *m = PacketOut{} }
+func (m *PacketOut) String() string { return proto.CompactTextString(m) }
+func (*PacketOut) ProtoMessage()    {}
+func (*PacketOut) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{96}
+}
+
+func (m *PacketOut) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PacketOut.Unmarshal(m, b)
+}
+func (m *PacketOut) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PacketOut.Marshal(b, m, deterministic)
+}
+func (m *PacketOut) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PacketOut.Merge(m, src)
+}
+func (m *PacketOut) XXX_Size() int {
+	return xxx_messageInfo_PacketOut.Size(m)
+}
+func (m *PacketOut) XXX_DiscardUnknown() {
+	xxx_messageInfo_PacketOut.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PacketOut proto.InternalMessageInfo
+
+func (m *PacketOut) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PacketOut) GetPacketOut() *OfpPacketOut {
+	if m != nil {
+		return m.PacketOut
+	}
+	return nil
+}
+
+type ChangeEvent struct {
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Types that are valid to be assigned to Event:
+	//	*ChangeEvent_PortStatus
+	//	*ChangeEvent_Error
+	//	*ChangeEvent_DeviceStatus
+	Event                isChangeEvent_Event `protobuf_oneof:"event"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *ChangeEvent) Reset()         { *m = ChangeEvent{} }
+func (m *ChangeEvent) String() string { return proto.CompactTextString(m) }
+func (*ChangeEvent) ProtoMessage()    {}
+func (*ChangeEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{97}
+}
+
+func (m *ChangeEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ChangeEvent.Unmarshal(m, b)
+}
+func (m *ChangeEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ChangeEvent.Marshal(b, m, deterministic)
+}
+func (m *ChangeEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ChangeEvent.Merge(m, src)
+}
+func (m *ChangeEvent) XXX_Size() int {
+	return xxx_messageInfo_ChangeEvent.Size(m)
+}
+func (m *ChangeEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_ChangeEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ChangeEvent proto.InternalMessageInfo
+
+func (m *ChangeEvent) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+type isChangeEvent_Event interface {
+	isChangeEvent_Event()
+}
+
+type ChangeEvent_PortStatus struct {
+	PortStatus *OfpPortStatus `protobuf:"bytes,2,opt,name=port_status,json=portStatus,proto3,oneof"`
+}
+
+type ChangeEvent_Error struct {
+	Error *OfpErrorMsg `protobuf:"bytes,3,opt,name=error,proto3,oneof"`
+}
+
+type ChangeEvent_DeviceStatus struct {
+	DeviceStatus *OfpDeviceStatus `protobuf:"bytes,4,opt,name=device_status,json=deviceStatus,proto3,oneof"`
+}
+
+func (*ChangeEvent_PortStatus) isChangeEvent_Event() {}
+
+func (*ChangeEvent_Error) isChangeEvent_Event() {}
+
+func (*ChangeEvent_DeviceStatus) isChangeEvent_Event() {}
+
+func (m *ChangeEvent) GetEvent() isChangeEvent_Event {
+	if m != nil {
+		return m.Event
+	}
+	return nil
+}
+
+func (m *ChangeEvent) GetPortStatus() *OfpPortStatus {
+	if x, ok := m.GetEvent().(*ChangeEvent_PortStatus); ok {
+		return x.PortStatus
+	}
+	return nil
+}
+
+func (m *ChangeEvent) GetError() *OfpErrorMsg {
+	if x, ok := m.GetEvent().(*ChangeEvent_Error); ok {
+		return x.Error
+	}
+	return nil
+}
+
+func (m *ChangeEvent) GetDeviceStatus() *OfpDeviceStatus {
+	if x, ok := m.GetEvent().(*ChangeEvent_DeviceStatus); ok {
+		return x.DeviceStatus
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*ChangeEvent) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*ChangeEvent_PortStatus)(nil),
+		(*ChangeEvent_Error)(nil),
+		(*ChangeEvent_DeviceStatus)(nil),
+	}
+}
+
+// Additional information required to process flow at device adapters
+type FlowMetadata struct {
+	// Meters associated with flow-update to adapter
+	Meters               []*OfpMeterConfig `protobuf:"bytes,1,rep,name=meters,proto3" json:"meters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *FlowMetadata) Reset()         { *m = FlowMetadata{} }
+func (m *FlowMetadata) String() string { return proto.CompactTextString(m) }
+func (*FlowMetadata) ProtoMessage()    {}
+func (*FlowMetadata) Descriptor() ([]byte, []int) {
+	return fileDescriptor_08e3a4e375aeddc7, []int{98}
+}
+
+func (m *FlowMetadata) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_FlowMetadata.Unmarshal(m, b)
+}
+func (m *FlowMetadata) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_FlowMetadata.Marshal(b, m, deterministic)
+}
+func (m *FlowMetadata) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_FlowMetadata.Merge(m, src)
+}
+func (m *FlowMetadata) XXX_Size() int {
+	return xxx_messageInfo_FlowMetadata.Size(m)
+}
+func (m *FlowMetadata) XXX_DiscardUnknown() {
+	xxx_messageInfo_FlowMetadata.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_FlowMetadata proto.InternalMessageInfo
+
+func (m *FlowMetadata) GetMeters() []*OfpMeterConfig {
+	if m != nil {
+		return m.Meters
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterEnum("openflow_13.OfpPortNo", OfpPortNo_name, OfpPortNo_value)
+	proto.RegisterEnum("openflow_13.OfpType", OfpType_name, OfpType_value)
+	proto.RegisterEnum("openflow_13.OfpHelloElemType", OfpHelloElemType_name, OfpHelloElemType_value)
+	proto.RegisterEnum("openflow_13.OfpConfigFlags", OfpConfigFlags_name, OfpConfigFlags_value)
+	proto.RegisterEnum("openflow_13.OfpTableConfig", OfpTableConfig_name, OfpTableConfig_value)
+	proto.RegisterEnum("openflow_13.OfpTable", OfpTable_name, OfpTable_value)
+	proto.RegisterEnum("openflow_13.OfpCapabilities", OfpCapabilities_name, OfpCapabilities_value)
+	proto.RegisterEnum("openflow_13.OfpPortConfig", OfpPortConfig_name, OfpPortConfig_value)
+	proto.RegisterEnum("openflow_13.OfpPortState", OfpPortState_name, OfpPortState_value)
+	proto.RegisterEnum("openflow_13.OfpPortFeatures", OfpPortFeatures_name, OfpPortFeatures_value)
+	proto.RegisterEnum("openflow_13.OfpPortReason", OfpPortReason_name, OfpPortReason_value)
+	proto.RegisterEnum("openflow_13.OfpDeviceConnection", OfpDeviceConnection_name, OfpDeviceConnection_value)
+	proto.RegisterEnum("openflow_13.OfpMatchType", OfpMatchType_name, OfpMatchType_value)
+	proto.RegisterEnum("openflow_13.OfpOxmClass", OfpOxmClass_name, OfpOxmClass_value)
+	proto.RegisterEnum("openflow_13.OxmOfbFieldTypes", OxmOfbFieldTypes_name, OxmOfbFieldTypes_value)
+	proto.RegisterEnum("openflow_13.OfpVlanId", OfpVlanId_name, OfpVlanId_value)
+	proto.RegisterEnum("openflow_13.OfpIpv6ExthdrFlags", OfpIpv6ExthdrFlags_name, OfpIpv6ExthdrFlags_value)
+	proto.RegisterEnum("openflow_13.OfpActionType", OfpActionType_name, OfpActionType_value)
+	proto.RegisterEnum("openflow_13.OfpControllerMaxLen", OfpControllerMaxLen_name, OfpControllerMaxLen_value)
+	proto.RegisterEnum("openflow_13.OfpInstructionType", OfpInstructionType_name, OfpInstructionType_value)
+	proto.RegisterEnum("openflow_13.OfpFlowModCommand", OfpFlowModCommand_name, OfpFlowModCommand_value)
+	proto.RegisterEnum("openflow_13.OfpFlowModFlags", OfpFlowModFlags_name, OfpFlowModFlags_value)
+	proto.RegisterEnum("openflow_13.OfpGroup", OfpGroup_name, OfpGroup_value)
+	proto.RegisterEnum("openflow_13.OfpGroupModCommand", OfpGroupModCommand_name, OfpGroupModCommand_value)
+	proto.RegisterEnum("openflow_13.OfpGroupType", OfpGroupType_name, OfpGroupType_value)
+	proto.RegisterEnum("openflow_13.OfpPacketInReason", OfpPacketInReason_name, OfpPacketInReason_value)
+	proto.RegisterEnum("openflow_13.OfpFlowRemovedReason", OfpFlowRemovedReason_name, OfpFlowRemovedReason_value)
+	proto.RegisterEnum("openflow_13.OfpMeter", OfpMeter_name, OfpMeter_value)
+	proto.RegisterEnum("openflow_13.OfpMeterBandType", OfpMeterBandType_name, OfpMeterBandType_value)
+	proto.RegisterEnum("openflow_13.OfpMeterModCommand", OfpMeterModCommand_name, OfpMeterModCommand_value)
+	proto.RegisterEnum("openflow_13.OfpMeterFlags", OfpMeterFlags_name, OfpMeterFlags_value)
+	proto.RegisterEnum("openflow_13.OfpErrorType", OfpErrorType_name, OfpErrorType_value)
+	proto.RegisterEnum("openflow_13.OfpHelloFailedCode", OfpHelloFailedCode_name, OfpHelloFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadRequestCode", OfpBadRequestCode_name, OfpBadRequestCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadActionCode", OfpBadActionCode_name, OfpBadActionCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadInstructionCode", OfpBadInstructionCode_name, OfpBadInstructionCode_value)
+	proto.RegisterEnum("openflow_13.OfpBadMatchCode", OfpBadMatchCode_name, OfpBadMatchCode_value)
+	proto.RegisterEnum("openflow_13.OfpFlowModFailedCode", OfpFlowModFailedCode_name, OfpFlowModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpGroupModFailedCode", OfpGroupModFailedCode_name, OfpGroupModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpPortModFailedCode", OfpPortModFailedCode_name, OfpPortModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpTableModFailedCode", OfpTableModFailedCode_name, OfpTableModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpQueueOpFailedCode", OfpQueueOpFailedCode_name, OfpQueueOpFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpSwitchConfigFailedCode", OfpSwitchConfigFailedCode_name, OfpSwitchConfigFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpRoleRequestFailedCode", OfpRoleRequestFailedCode_name, OfpRoleRequestFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpMeterModFailedCode", OfpMeterModFailedCode_name, OfpMeterModFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpTableFeaturesFailedCode", OfpTableFeaturesFailedCode_name, OfpTableFeaturesFailedCode_value)
+	proto.RegisterEnum("openflow_13.OfpMultipartType", OfpMultipartType_name, OfpMultipartType_value)
+	proto.RegisterEnum("openflow_13.OfpMultipartRequestFlags", OfpMultipartRequestFlags_name, OfpMultipartRequestFlags_value)
+	proto.RegisterEnum("openflow_13.OfpMultipartReplyFlags", OfpMultipartReplyFlags_name, OfpMultipartReplyFlags_value)
+	proto.RegisterEnum("openflow_13.OfpTableFeaturePropType", OfpTableFeaturePropType_name, OfpTableFeaturePropType_value)
+	proto.RegisterEnum("openflow_13.OfpGroupCapabilities", OfpGroupCapabilities_name, OfpGroupCapabilities_value)
+	proto.RegisterEnum("openflow_13.OfpQueueProperties", OfpQueueProperties_name, OfpQueueProperties_value)
+	proto.RegisterEnum("openflow_13.OfpControllerRole", OfpControllerRole_name, OfpControllerRole_value)
+	proto.RegisterType((*OfpHeader)(nil), "openflow_13.ofp_header")
+	proto.RegisterType((*OfpHelloElemHeader)(nil), "openflow_13.ofp_hello_elem_header")
+	proto.RegisterType((*OfpHelloElemVersionbitmap)(nil), "openflow_13.ofp_hello_elem_versionbitmap")
+	proto.RegisterType((*OfpHello)(nil), "openflow_13.ofp_hello")
+	proto.RegisterType((*OfpSwitchConfig)(nil), "openflow_13.ofp_switch_config")
+	proto.RegisterType((*OfpTableMod)(nil), "openflow_13.ofp_table_mod")
+	proto.RegisterType((*OfpPort)(nil), "openflow_13.ofp_port")
+	proto.RegisterType((*OfpSwitchFeatures)(nil), "openflow_13.ofp_switch_features")
+	proto.RegisterType((*OfpPortStatus)(nil), "openflow_13.ofp_port_status")
+	proto.RegisterType((*OfpDeviceStatus)(nil), "openflow_13.ofp_device_status")
+	proto.RegisterType((*OfpPortMod)(nil), "openflow_13.ofp_port_mod")
+	proto.RegisterType((*OfpMatch)(nil), "openflow_13.ofp_match")
+	proto.RegisterType((*OfpOxmField)(nil), "openflow_13.ofp_oxm_field")
+	proto.RegisterType((*OfpOxmOfbField)(nil), "openflow_13.ofp_oxm_ofb_field")
+	proto.RegisterType((*OfpOxmExperimenterField)(nil), "openflow_13.ofp_oxm_experimenter_field")
+	proto.RegisterType((*OfpAction)(nil), "openflow_13.ofp_action")
+	proto.RegisterType((*OfpActionOutput)(nil), "openflow_13.ofp_action_output")
+	proto.RegisterType((*OfpActionMplsTtl)(nil), "openflow_13.ofp_action_mpls_ttl")
+	proto.RegisterType((*OfpActionPush)(nil), "openflow_13.ofp_action_push")
+	proto.RegisterType((*OfpActionPopMpls)(nil), "openflow_13.ofp_action_pop_mpls")
+	proto.RegisterType((*OfpActionGroup)(nil), "openflow_13.ofp_action_group")
+	proto.RegisterType((*OfpActionNwTtl)(nil), "openflow_13.ofp_action_nw_ttl")
+	proto.RegisterType((*OfpActionSetField)(nil), "openflow_13.ofp_action_set_field")
+	proto.RegisterType((*OfpActionExperimenter)(nil), "openflow_13.ofp_action_experimenter")
+	proto.RegisterType((*OfpInstruction)(nil), "openflow_13.ofp_instruction")
+	proto.RegisterType((*OfpInstructionGotoTable)(nil), "openflow_13.ofp_instruction_goto_table")
+	proto.RegisterType((*OfpInstructionWriteMetadata)(nil), "openflow_13.ofp_instruction_write_metadata")
+	proto.RegisterType((*OfpInstructionActions)(nil), "openflow_13.ofp_instruction_actions")
+	proto.RegisterType((*OfpInstructionMeter)(nil), "openflow_13.ofp_instruction_meter")
+	proto.RegisterType((*OfpInstructionExperimenter)(nil), "openflow_13.ofp_instruction_experimenter")
+	proto.RegisterType((*OfpFlowMod)(nil), "openflow_13.ofp_flow_mod")
+	proto.RegisterType((*OfpBucket)(nil), "openflow_13.ofp_bucket")
+	proto.RegisterType((*OfpGroupMod)(nil), "openflow_13.ofp_group_mod")
+	proto.RegisterType((*OfpPacketOut)(nil), "openflow_13.ofp_packet_out")
+	proto.RegisterType((*OfpPacketIn)(nil), "openflow_13.ofp_packet_in")
+	proto.RegisterType((*OfpFlowRemoved)(nil), "openflow_13.ofp_flow_removed")
+	proto.RegisterType((*OfpMeterBandHeader)(nil), "openflow_13.ofp_meter_band_header")
+	proto.RegisterType((*OfpMeterBandDrop)(nil), "openflow_13.ofp_meter_band_drop")
+	proto.RegisterType((*OfpMeterBandDscpRemark)(nil), "openflow_13.ofp_meter_band_dscp_remark")
+	proto.RegisterType((*OfpMeterBandExperimenter)(nil), "openflow_13.ofp_meter_band_experimenter")
+	proto.RegisterType((*OfpMeterMod)(nil), "openflow_13.ofp_meter_mod")
+	proto.RegisterType((*OfpErrorMsg)(nil), "openflow_13.ofp_error_msg")
+	proto.RegisterType((*OfpErrorExperimenterMsg)(nil), "openflow_13.ofp_error_experimenter_msg")
+	proto.RegisterType((*OfpMultipartRequest)(nil), "openflow_13.ofp_multipart_request")
+	proto.RegisterType((*OfpMultipartReply)(nil), "openflow_13.ofp_multipart_reply")
+	proto.RegisterType((*OfpDesc)(nil), "openflow_13.ofp_desc")
+	proto.RegisterType((*OfpFlowStatsRequest)(nil), "openflow_13.ofp_flow_stats_request")
+	proto.RegisterType((*OfpFlowStats)(nil), "openflow_13.ofp_flow_stats")
+	proto.RegisterType((*OfpAggregateStatsRequest)(nil), "openflow_13.ofp_aggregate_stats_request")
+	proto.RegisterType((*OfpAggregateStatsReply)(nil), "openflow_13.ofp_aggregate_stats_reply")
+	proto.RegisterType((*OfpTableFeatureProperty)(nil), "openflow_13.ofp_table_feature_property")
+	proto.RegisterType((*OfpTableFeaturePropInstructions)(nil), "openflow_13.ofp_table_feature_prop_instructions")
+	proto.RegisterType((*OfpTableFeaturePropNextTables)(nil), "openflow_13.ofp_table_feature_prop_next_tables")
+	proto.RegisterType((*OfpTableFeaturePropActions)(nil), "openflow_13.ofp_table_feature_prop_actions")
+	proto.RegisterType((*OfpTableFeaturePropOxm)(nil), "openflow_13.ofp_table_feature_prop_oxm")
+	proto.RegisterType((*OfpTableFeaturePropExperimenter)(nil), "openflow_13.ofp_table_feature_prop_experimenter")
+	proto.RegisterType((*OfpTableFeatures)(nil), "openflow_13.ofp_table_features")
+	proto.RegisterType((*OfpTableStats)(nil), "openflow_13.ofp_table_stats")
+	proto.RegisterType((*OfpPortStatsRequest)(nil), "openflow_13.ofp_port_stats_request")
+	proto.RegisterType((*OfpPortStats)(nil), "openflow_13.ofp_port_stats")
+	proto.RegisterType((*OfpGroupStatsRequest)(nil), "openflow_13.ofp_group_stats_request")
+	proto.RegisterType((*OfpBucketCounter)(nil), "openflow_13.ofp_bucket_counter")
+	proto.RegisterType((*OfpGroupStats)(nil), "openflow_13.ofp_group_stats")
+	proto.RegisterType((*OfpGroupDesc)(nil), "openflow_13.ofp_group_desc")
+	proto.RegisterType((*OfpGroupEntry)(nil), "openflow_13.ofp_group_entry")
+	proto.RegisterType((*OfpGroupFeatures)(nil), "openflow_13.ofp_group_features")
+	proto.RegisterType((*OfpMeterMultipartRequest)(nil), "openflow_13.ofp_meter_multipart_request")
+	proto.RegisterType((*OfpMeterBandStats)(nil), "openflow_13.ofp_meter_band_stats")
+	proto.RegisterType((*OfpMeterStats)(nil), "openflow_13.ofp_meter_stats")
+	proto.RegisterType((*OfpMeterConfig)(nil), "openflow_13.ofp_meter_config")
+	proto.RegisterType((*OfpMeterFeatures)(nil), "openflow_13.ofp_meter_features")
+	proto.RegisterType((*OfpMeterEntry)(nil), "openflow_13.ofp_meter_entry")
+	proto.RegisterType((*OfpExperimenterMultipartHeader)(nil), "openflow_13.ofp_experimenter_multipart_header")
+	proto.RegisterType((*OfpExperimenterHeader)(nil), "openflow_13.ofp_experimenter_header")
+	proto.RegisterType((*OfpQueuePropHeader)(nil), "openflow_13.ofp_queue_prop_header")
+	proto.RegisterType((*OfpQueuePropMinRate)(nil), "openflow_13.ofp_queue_prop_min_rate")
+	proto.RegisterType((*OfpQueuePropMaxRate)(nil), "openflow_13.ofp_queue_prop_max_rate")
+	proto.RegisterType((*OfpQueuePropExperimenter)(nil), "openflow_13.ofp_queue_prop_experimenter")
+	proto.RegisterType((*OfpPacketQueue)(nil), "openflow_13.ofp_packet_queue")
+	proto.RegisterType((*OfpQueueGetConfigRequest)(nil), "openflow_13.ofp_queue_get_config_request")
+	proto.RegisterType((*OfpQueueGetConfigReply)(nil), "openflow_13.ofp_queue_get_config_reply")
+	proto.RegisterType((*OfpActionSetQueue)(nil), "openflow_13.ofp_action_set_queue")
+	proto.RegisterType((*OfpQueueStatsRequest)(nil), "openflow_13.ofp_queue_stats_request")
+	proto.RegisterType((*OfpQueueStats)(nil), "openflow_13.ofp_queue_stats")
+	proto.RegisterType((*OfpRoleRequest)(nil), "openflow_13.ofp_role_request")
+	proto.RegisterType((*OfpAsyncConfig)(nil), "openflow_13.ofp_async_config")
+	proto.RegisterType((*MeterModUpdate)(nil), "openflow_13.MeterModUpdate")
+	proto.RegisterType((*MeterStatsReply)(nil), "openflow_13.MeterStatsReply")
+	proto.RegisterType((*FlowTableUpdate)(nil), "openflow_13.FlowTableUpdate")
+	proto.RegisterType((*FlowGroupTableUpdate)(nil), "openflow_13.FlowGroupTableUpdate")
+	proto.RegisterType((*Flows)(nil), "openflow_13.Flows")
+	proto.RegisterType((*Meters)(nil), "openflow_13.Meters")
+	proto.RegisterType((*FlowGroups)(nil), "openflow_13.FlowGroups")
+	proto.RegisterType((*FlowChanges)(nil), "openflow_13.FlowChanges")
+	proto.RegisterType((*FlowGroupChanges)(nil), "openflow_13.FlowGroupChanges")
+	proto.RegisterType((*PacketIn)(nil), "openflow_13.PacketIn")
+	proto.RegisterType((*PacketOut)(nil), "openflow_13.PacketOut")
+	proto.RegisterType((*ChangeEvent)(nil), "openflow_13.ChangeEvent")
+	proto.RegisterType((*FlowMetadata)(nil), "openflow_13.FlowMetadata")
+}
+
+func init() { proto.RegisterFile("voltha_protos/openflow_13.proto", fileDescriptor_08e3a4e375aeddc7) }
+
+var fileDescriptor_08e3a4e375aeddc7 = []byte{
+	// 8566 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x7d, 0x5b, 0x8c, 0x1b, 0x49,
+	0x92, 0x98, 0xf8, 0x68, 0x36, 0x99, 0xec, 0x6e, 0x95, 0x4a, 0x2f, 0x4a, 0x2d, 0x8d, 0x24, 0xee,
+	0xcc, 0xee, 0x2c, 0xd7, 0x37, 0x9a, 0xd1, 0x68, 0xb4, 0x7b, 0xfb, 0x38, 0xab, 0x48, 0x16, 0x9b,
+	0x1c, 0xf1, 0xa5, 0xaa, 0xea, 0x96, 0xb4, 0x86, 0x5d, 0xa0, 0xc8, 0x52, 0x37, 0x6f, 0x48, 0x16,
+	0xb7, 0xaa, 0xba, 0xd5, 0x3a, 0xef, 0x19, 0xb2, 0x0f, 0x86, 0x01, 0xdb, 0x77, 0x67, 0xe3, 0x3e,
+	0x16, 0x30, 0xce, 0x80, 0x0f, 0xb6, 0x3f, 0x0c, 0x03, 0xfe, 0x30, 0x60, 0xc0, 0x80, 0xbf, 0x0f,
+	0xb0, 0x01, 0xc3, 0x06, 0x0e, 0x30, 0xee, 0xe7, 0xee, 0xc7, 0x38, 0xff, 0x18, 0xb8, 0x7f, 0xfb,
+	0xbc, 0x5e, 0x19, 0x91, 0x11, 0x99, 0x95, 0xc5, 0x47, 0x4f, 0xef, 0x7a, 0xd6, 0x1f, 0xfe, 0x12,
+	0x2b, 0x5e, 0x19, 0x19, 0x19, 0x11, 0x19, 0x19, 0x95, 0xd5, 0x62, 0x77, 0x4e, 0xfc, 0x49, 0x74,
+	0x34, 0x70, 0xe7, 0x81, 0x1f, 0xf9, 0xe1, 0x7d, 0x7f, 0xee, 0xcd, 0x5e, 0x4d, 0xfc, 0xd7, 0xee,
+	0x27, 0x9f, 0x7e, 0xc4, 0x41, 0x7a, 0x51, 0x01, 0xdd, 0xbc, 0x75, 0xe8, 0xfb, 0x87, 0x13, 0xef,
+	0xfe, 0x60, 0x3e, 0xbe, 0x3f, 0x98, 0xcd, 0xfc, 0x68, 0x10, 0x8d, 0xfd, 0x59, 0x88, 0xa4, 0xe5,
+	0x21, 0x63, 0xfe, 0xab, 0xb9, 0x7b, 0xe4, 0x0d, 0x46, 0x5e, 0xa0, 0x97, 0xd8, 0xe6, 0x89, 0x17,
+	0x84, 0x63, 0x7f, 0x56, 0x4a, 0xdd, 0x4d, 0x7d, 0xb8, 0x6d, 0x89, 0x47, 0xfd, 0x9b, 0x2c, 0x1b,
+	0xbd, 0x99, 0x7b, 0xa5, 0xf4, 0xdd, 0xd4, 0x87, 0x3b, 0x0f, 0xae, 0x7e, 0xa4, 0x0e, 0x0a, 0x02,
+	0x00, 0x69, 0x71, 0x12, 0x5d, 0x63, 0x99, 0xd3, 0xf1, 0xa8, 0x94, 0xe1, 0x02, 0xe0, 0x67, 0xf9,
+	0x5f, 0xa6, 0xd8, 0x55, 0x1c, 0x65, 0x32, 0xf1, 0x5d, 0x6f, 0xe2, 0x4d, 0xc5, 0x80, 0x0f, 0x49,
+	0x6c, 0x8a, 0x8b, 0xbd, 0xbb, 0x24, 0x56, 0xe1, 0x50, 0x46, 0x78, 0xca, 0xb6, 0x49, 0xaf, 0x97,
+	0xe3, 0x68, 0x3a, 0x98, 0x73, 0xad, 0x8a, 0x0f, 0xbe, 0x79, 0x16, 0x7b, 0x82, 0xa1, 0x79, 0xc1,
+	0x4a, 0x4a, 0xa8, 0x16, 0xd8, 0x26, 0x90, 0x79, 0xb3, 0xa8, 0xfc, 0x1d, 0x76, 0xeb, 0x2c, 0x5e,
+	0x30, 0x12, 0xfe, 0x0a, 0x4b, 0xe9, 0xbb, 0x19, 0x30, 0x12, 0x3d, 0x96, 0x9f, 0xb0, 0x82, 0xe4,
+	0xd4, 0x7f, 0x8d, 0xe5, 0x49, 0x62, 0x58, 0x4a, 0xdd, 0xcd, 0x7c, 0x58, 0x7c, 0x50, 0x3e, 0x4b,
+	0x3f, 0x34, 0x88, 0x25, 0x79, 0xca, 0x1d, 0x76, 0x09, 0x48, 0xc2, 0xd7, 0xe3, 0x68, 0x78, 0xe4,
+	0x0e, 0xfd, 0xd9, 0xab, 0xf1, 0xa1, 0x7e, 0x85, 0x6d, 0xbc, 0x9a, 0x0c, 0x0e, 0x43, 0x5a, 0x1e,
+	0x7c, 0xd0, 0xcb, 0x6c, 0x7b, 0x3a, 0x0e, 0x43, 0x37, 0xf4, 0x66, 0x23, 0x77, 0xe2, 0xcd, 0xb8,
+	0x3d, 0xb6, 0xad, 0x22, 0x00, 0x6d, 0x6f, 0x36, 0x6a, 0x7b, 0xb3, 0x72, 0x95, 0x6d, 0xf3, 0x75,
+	0x1a, 0xbc, 0x9c, 0x78, 0xee, 0xd4, 0x1f, 0xe9, 0x37, 0x58, 0x1e, 0x1f, 0xc6, 0x23, 0xb1, 0xd8,
+	0xfc, 0xb9, 0x35, 0xd2, 0xaf, 0xb1, 0x1c, 0x8e, 0x47, 0x82, 0xe8, 0xa9, 0xfc, 0x4f, 0xd2, 0x2c,
+	0x0f, 0x42, 0xe6, 0x7e, 0x10, 0xe9, 0xd7, 0xd9, 0x26, 0xfc, 0xeb, 0xce, 0x7c, 0x62, 0xcf, 0xc1,
+	0x63, 0xd7, 0x07, 0xc4, 0xd1, 0x6b, 0x77, 0x30, 0x1a, 0x05, 0x64, 0x9f, 0xdc, 0xd1, 0x6b, 0x63,
+	0x34, 0x0a, 0x74, 0x9d, 0x65, 0x67, 0x83, 0xa9, 0xc7, 0x3d, 0xa3, 0x60, 0xf1, 0xdf, 0xca, 0x50,
+	0x59, 0x75, 0x28, 0x98, 0x68, 0x18, 0x0d, 0x22, 0xaf, 0xb4, 0x81, 0x13, 0xe5, 0x0f, 0x20, 0x61,
+	0x78, 0x1c, 0x04, 0xa5, 0x1c, 0x07, 0xf2, 0xdf, 0xfa, 0x7b, 0x8c, 0x0d, 0x46, 0x27, 0x5e, 0x10,
+	0x8d, 0x43, 0x6f, 0x54, 0xda, 0xe4, 0x18, 0x05, 0xa2, 0xdf, 0x62, 0x85, 0xf0, 0x78, 0x0e, 0xba,
+	0x79, 0xa3, 0x52, 0x9e, 0xa3, 0x63, 0x00, 0x48, 0x9c, 0x7b, 0x5e, 0x50, 0x2a, 0xa0, 0x44, 0xf8,
+	0xad, 0xdf, 0x66, 0x0c, 0x24, 0xbb, 0xe1, 0xdc, 0xf3, 0x46, 0x25, 0x86, 0x2c, 0x00, 0xb1, 0x01,
+	0xa0, 0xef, 0xb2, 0xc2, 0x74, 0x70, 0x4a, 0xd8, 0x22, 0xc7, 0xe6, 0xa7, 0x83, 0x53, 0x8e, 0x2c,
+	0xff, 0x9b, 0x14, 0xbb, 0xac, 0x2c, 0xdb, 0x2b, 0x6f, 0x10, 0x1d, 0x07, 0x5e, 0xa8, 0xdf, 0x61,
+	0xc5, 0xd1, 0x20, 0x1a, 0xcc, 0x07, 0xd1, 0x91, 0x30, 0x78, 0xd6, 0x62, 0x02, 0xd4, 0xe2, 0x52,
+	0x67, 0xee, 0xcb, 0xe3, 0x57, 0xaf, 0xbc, 0x20, 0x24, 0xb3, 0xe7, 0x67, 0x55, 0x7c, 0x86, 0xb5,
+	0x9a, 0xe1, 0xd2, 0x85, 0x14, 0x57, 0x9b, 0x33, 0x87, 0x3f, 0xea, 0xf7, 0xd8, 0xd6, 0xe0, 0xf8,
+	0x74, 0x3c, 0x19, 0x0f, 0x82, 0x37, 0x20, 0x19, 0xcd, 0x58, 0x94, 0xb0, 0xd6, 0x48, 0x2f, 0xb3,
+	0xad, 0xe1, 0x60, 0x3e, 0x78, 0x39, 0x9e, 0x8c, 0xa3, 0xb1, 0x17, 0x92, 0x49, 0x13, 0xb0, 0x72,
+	0xc0, 0x2e, 0x8a, 0x95, 0x75, 0xc1, 0xd6, 0xc7, 0xa1, 0xfe, 0x90, 0xe5, 0x02, 0x6f, 0x10, 0x52,
+	0x2e, 0xd8, 0x79, 0x70, 0x6b, 0xc9, 0x7d, 0x39, 0x35, 0xd2, 0x58, 0x44, 0x0b, 0x89, 0x62, 0xe4,
+	0x85, 0x43, 0x0a, 0xc9, 0xab, 0x2b, 0x79, 0x2c, 0x4e, 0x52, 0xee, 0xa1, 0x87, 0x8f, 0xbc, 0x93,
+	0xf1, 0xd0, 0x13, 0xa3, 0x7e, 0x97, 0xe5, 0xf0, 0x17, 0x8d, 0xba, 0x1c, 0x34, 0x44, 0x3f, 0xf4,
+	0x67, 0x33, 0x6f, 0x08, 0xb9, 0xcc, 0x22, 0x8e, 0xf2, 0xdf, 0x4d, 0xb1, 0x2d, 0xa9, 0x17, 0xf8,
+	0xf8, 0xcf, 0xef, 0xa3, 0xb1, 0x3f, 0x66, 0x12, 0xfe, 0xa8, 0xb3, 0xec, 0x74, 0x10, 0x7e, 0x41,
+	0xe6, 0xe5, 0xbf, 0xc1, 0xb3, 0xa4, 0x9f, 0x91, 0x51, 0x63, 0x40, 0xf9, 0x35, 0x26, 0x83, 0xe9,
+	0x20, 0x1a, 0x1e, 0xe9, 0xf7, 0x13, 0x79, 0x6e, 0x77, 0x69, 0x4e, 0x9c, 0x4a, 0x4d, 0x71, 0xbf,
+	0xca, 0x98, 0x7f, 0x3a, 0x75, 0x5f, 0x8d, 0xbd, 0xc9, 0x08, 0xf3, 0x4c, 0xf1, 0xc1, 0xcd, 0x25,
+	0x36, 0x49, 0x62, 0x15, 0xfc, 0xd3, 0x69, 0x83, 0x13, 0x97, 0xff, 0x7b, 0x0a, 0x43, 0x5d, 0x22,
+	0xf5, 0x6f, 0x33, 0x40, 0xbb, 0xc3, 0xc9, 0x20, 0x14, 0x66, 0x5d, 0x2d, 0x8b, 0x53, 0x58, 0x79,
+	0xff, 0x74, 0x5a, 0x83, 0x5f, 0xfa, 0x0f, 0x60, 0x0e, 0x2f, 0x51, 0x0a, 0x9f, 0x7a, 0xf1, 0xc1,
+	0x7b, 0x2b, 0x19, 0x25, 0x55, 0xf3, 0x82, 0x95, 0xf7, 0x5f, 0xbd, 0xe4, 0xaa, 0xe8, 0xcf, 0x99,
+	0xee, 0x9d, 0xce, 0xbd, 0x60, 0x0c, 0x19, 0xcd, 0x0b, 0x48, 0xce, 0x06, 0x97, 0xf3, 0x8d, 0x95,
+	0x72, 0x96, 0xc9, 0x9b, 0x17, 0xac, 0x4b, 0x2a, 0x94, 0x4b, 0xae, 0x6e, 0xb2, 0x0d, 0x8e, 0x2d,
+	0xff, 0xf1, 0x0e, 0x3a, 0x51, 0x42, 0x89, 0xb3, 0xb7, 0x15, 0x95, 0x92, 0x9b, 0x3c, 0x24, 0x9b,
+	0xdf, 0x60, 0xf9, 0xa3, 0x41, 0xe8, 0xf2, 0x75, 0x06, 0xf7, 0xcd, 0x5b, 0x9b, 0x47, 0x83, 0xb0,
+	0x03, 0x4b, 0x7d, 0x85, 0x65, 0xc1, 0x73, 0xd0, 0x29, 0x9a, 0x17, 0x2c, 0xfe, 0xa4, 0x7f, 0xc0,
+	0xb6, 0xe7, 0x47, 0x6f, 0xc2, 0xf1, 0x70, 0x30, 0xe1, 0x3e, 0x87, 0xde, 0xd1, 0xbc, 0x60, 0x6d,
+	0x09, 0x70, 0x1f, 0xc8, 0xbe, 0xc1, 0x76, 0x28, 0xed, 0x7a, 0xd1, 0x00, 0x42, 0x9e, 0x9b, 0x20,
+	0x0b, 0x9b, 0x10, 0x87, 0x77, 0x08, 0xac, 0xdf, 0x60, 0x9b, 0x5e, 0x74, 0xe4, 0x8e, 0xc2, 0x88,
+	0x67, 0xb8, 0xad, 0xe6, 0x05, 0x2b, 0xe7, 0x45, 0x47, 0xf5, 0x30, 0x12, 0xa8, 0x30, 0x18, 0xf2,
+	0x14, 0x27, 0x50, 0x76, 0x30, 0xd4, 0x77, 0x59, 0x1e, 0x50, 0x7c, 0xc2, 0x79, 0x52, 0x00, 0x88,
+	0x1d, 0x98, 0xd3, 0x2e, 0xcb, 0x9f, 0x4c, 0x06, 0x33, 0xf7, 0x64, 0x3c, 0xc2, 0x1c, 0x07, 0x48,
+	0x80, 0x1c, 0x8c, 0x47, 0x12, 0x39, 0x1f, 0xce, 0x31, 0xcd, 0x09, 0x64, 0x7f, 0x38, 0x87, 0x11,
+	0xc7, 0x73, 0x77, 0x14, 0x0e, 0xe7, 0x98, 0xe4, 0x60, 0xc4, 0xf1, 0xbc, 0x1e, 0x0e, 0xe7, 0xfa,
+	0x75, 0x96, 0x1b, 0xcf, 0x5d, 0x6f, 0x38, 0x2b, 0x6d, 0x11, 0x66, 0x63, 0x3c, 0x37, 0x87, 0x33,
+	0x10, 0x38, 0x9e, 0x63, 0x5d, 0x52, 0xda, 0x16, 0x02, 0xc7, 0xf3, 0x3e, 0xaf, 0x4a, 0x38, 0xf2,
+	0xe4, 0x21, 0x9f, 0xc3, 0x4e, 0x8c, 0x3c, 0x79, 0x48, 0x93, 0xe0, 0x48, 0x98, 0xfb, 0x45, 0x15,
+	0x49, 0x93, 0x8f, 0x86, 0x73, 0xce, 0xa8, 0x09, 0x55, 0xa2, 0xe1, 0x1c, 0xf8, 0x08, 0x05, 0x6c,
+	0x97, 0x14, 0x14, 0x71, 0x1d, 0x8f, 0x90, 0x4b, 0x17, 0xa8, 0xe3, 0x91, 0xe0, 0x02, 0x14, 0x70,
+	0x5d, 0x56, 0x50, 0xc0, 0xb5, 0xcb, 0xf2, 0xe1, 0x30, 0x42, 0xb6, 0x2b, 0x42, 0x11, 0x80, 0x90,
+	0x96, 0x1c, 0x09, 0x8c, 0x57, 0x55, 0x24, 0x70, 0xde, 0x63, 0xc5, 0xf1, 0x70, 0x0a, 0x93, 0xe0,
+	0x4b, 0x71, 0x8d, 0xf0, 0x0c, 0x81, 0x7c, 0x35, 0x62, 0x92, 0xa1, 0x3f, 0xf2, 0x4a, 0xd7, 0x93,
+	0x24, 0x35, 0x7f, 0xe4, 0x81, 0x6d, 0x07, 0xc1, 0xdc, 0xf5, 0xe7, 0xa5, 0x92, 0xb0, 0xed, 0x20,
+	0x98, 0xf7, 0xf8, 0x7a, 0x00, 0x22, 0x9c, 0x0f, 0x4a, 0x37, 0x84, 0xce, 0x83, 0x60, 0x6e, 0xcf,
+	0x07, 0x02, 0x15, 0xcd, 0x07, 0xa5, 0x9b, 0x0a, 0xca, 0x89, 0x51, 0xe1, 0xd1, 0xa0, 0xb4, 0x2b,
+	0xfc, 0x06, 0xb8, 0x8e, 0x62, 0xae, 0xa3, 0x41, 0xe9, 0x96, 0x82, 0x72, 0x8e, 0x06, 0xb4, 0x1a,
+	0x8f, 0xb8, 0x11, 0x6e, 0x13, 0x0e, 0x56, 0xe3, 0x51, 0xbc, 0x54, 0x8f, 0xb8, 0x11, 0xde, 0x53,
+	0x91, 0xc2, 0x08, 0x80, 0x7c, 0x35, 0x19, 0xbc, 0xf4, 0x26, 0xa5, 0x3b, 0x72, 0x86, 0xf3, 0x93,
+	0x47, 0x0d, 0x0e, 0x93, 0x46, 0x78, 0x84, 0x76, 0xba, 0x9b, 0x30, 0xc2, 0xa3, 0x84, 0x9d, 0x1e,
+	0xa1, 0x9d, 0xee, 0x25, 0x49, 0xb8, 0x9d, 0xbe, 0xce, 0x76, 0xf8, 0x40, 0xb3, 0x91, 0x1b, 0x0d,
+	0x82, 0x43, 0x2f, 0x2a, 0x95, 0x49, 0x97, 0x2d, 0x80, 0x77, 0x47, 0x0e, 0x87, 0xea, 0x77, 0x49,
+	0xa1, 0xd9, 0xc8, 0x0d, 0xc3, 0x49, 0xe9, 0x6b, 0x44, 0x54, 0x40, 0x22, 0x3b, 0x9c, 0xa8, 0x14,
+	0xd1, 0x64, 0x52, 0x7a, 0x3f, 0x49, 0xe1, 0x4c, 0x26, 0xfa, 0x1d, 0xc6, 0xa6, 0xf3, 0x49, 0xe8,
+	0xe2, 0x9c, 0x3e, 0x20, 0x6d, 0x0a, 0x00, 0x6b, 0xf3, 0x29, 0xdd, 0x60, 0x9b, 0x9c, 0x20, 0x1a,
+	0x96, 0xbe, 0x2e, 0x16, 0x00, 0x00, 0x0e, 0xb7, 0x16, 0x47, 0xbd, 0xf4, 0xc3, 0xd2, 0x37, 0x84,
+	0xcb, 0x00, 0xa4, 0xea, 0x87, 0x80, 0x9c, 0xbf, 0x7c, 0xe9, 0x8e, 0xc3, 0xf1, 0xa8, 0xf4, 0xa1,
+	0x40, 0xce, 0x5f, 0xbe, 0x6c, 0x85, 0xe3, 0x91, 0x7e, 0x9b, 0x15, 0xa2, 0xe3, 0xd9, 0xcc, 0x9b,
+	0xc0, 0xb6, 0xfe, 0x4d, 0xca, 0x18, 0x79, 0x04, 0xb5, 0x46, 0xd2, 0xd2, 0xde, 0x69, 0x74, 0x34,
+	0x0a, 0x4a, 0x15, 0xd5, 0xd2, 0x26, 0x87, 0xe9, 0x1f, 0xb3, 0xcb, 0xc9, 0xc4, 0x83, 0xb9, 0x6d,
+	0xcc, 0x65, 0xa5, 0xac, 0x4b, 0x89, 0xec, 0xc3, 0xf3, 0x5c, 0x99, 0x6d, 0x51, 0x06, 0x42, 0xd2,
+	0x5f, 0xe7, 0xc6, 0x48, 0x59, 0x0c, 0xd3, 0x90, 0x4a, 0x13, 0x06, 0x43, 0xa4, 0xf9, 0x42, 0xa1,
+	0xb1, 0x83, 0x21, 0xa7, 0x79, 0x9f, 0x6d, 0x8b, 0xb4, 0x83, 0x44, 0x53, 0xae, 0x5e, 0xca, 0x2a,
+	0x52, 0xee, 0x11, 0x54, 0x22, 0x23, 0x20, 0x55, 0x20, 0xa8, 0x28, 0x2d, 0x24, 0xa8, 0xa4, 0x52,
+	0xa1, 0x4a, 0xa5, 0x68, 0x45, 0xe1, 0x81, 0x44, 0xbf, 0x49, 0x44, 0x0c, 0x63, 0x44, 0xa5, 0x89,
+	0x04, 0xcd, 0xdf, 0x50, 0x68, 0x1c, 0xa2, 0xf9, 0x80, 0x8f, 0xf6, 0x28, 0xd6, 0xe9, 0x6f, 0xa6,
+	0x68, 0x7e, 0x45, 0x0a, 0x80, 0x04, 0x99, 0x54, 0xea, 0x6f, 0x25, 0xc8, 0x84, 0x56, 0xdf, 0x62,
+	0x9a, 0x12, 0x0e, 0x48, 0xf9, 0x5b, 0x29, 0x1a, 0x76, 0x27, 0x0e, 0x0a, 0x21, 0x53, 0x78, 0x03,
+	0x52, 0xfe, 0x7d, 0x41, 0x59, 0x24, 0x9f, 0xe0, 0x64, 0xb0, 0x9d, 0x08, 0xbf, 0x40, 0xba, 0xdf,
+	0x4e, 0xd1, 0x8a, 0x6e, 0x09, 0xef, 0x48, 0x0c, 0x8e, 0x1e, 0x82, 0xa4, 0xbf, 0x93, 0x18, 0x1c,
+	0xfd, 0x04, 0x88, 0x61, 0x47, 0x3d, 0x19, 0x4c, 0x8e, 0xbd, 0x6a, 0x0e, 0x2b, 0x9d, 0xb2, 0xcb,
+	0x6e, 0xae, 0xdf, 0x95, 0xa1, 0x46, 0x06, 0x0c, 0x9e, 0x5a, 0xa8, 0xb8, 0x82, 0x22, 0xa3, 0x89,
+	0xe7, 0x3a, 0xf0, 0x11, 0x85, 0x89, 0x0a, 0xda, 0x04, 0xac, 0xfc, 0xaf, 0xb3, 0x78, 0xf6, 0x1c,
+	0xf0, 0x22, 0x4e, 0xff, 0x38, 0xb1, 0x67, 0x2f, 0x17, 0x9b, 0x48, 0xa6, 0xd6, 0x48, 0xdf, 0x61,
+	0x39, 0xff, 0x38, 0x9a, 0x1f, 0x47, 0x54, 0x6c, 0xbe, 0xb7, 0x8e, 0x07, 0xa9, 0x20, 0x28, 0xf1,
+	0x97, 0xfe, 0x03, 0x0a, 0xca, 0x28, 0x9a, 0xf0, 0x2d, 0xbd, 0xb8, 0xe2, 0xe8, 0x49, 0xbc, 0x82,
+	0x4e, 0x84, 0xad, 0x13, 0x4d, 0xf4, 0x07, 0x2c, 0x3b, 0x3f, 0x0e, 0x8f, 0xa8, 0x22, 0x5a, 0xab,
+	0x2a, 0xd0, 0xf0, 0x5a, 0xe1, 0x38, 0x3c, 0x82, 0x21, 0xe7, 0xfe, 0x9c, 0x8b, 0xa3, 0x0a, 0x68,
+	0xed, 0x90, 0x82, 0x8e, 0x27, 0x03, 0x7f, 0xde, 0x99, 0x4f, 0x42, 0xfd, 0x33, 0xb6, 0x71, 0x18,
+	0xf8, 0xc7, 0x73, 0x5e, 0x18, 0x14, 0x1f, 0xdc, 0x5e, 0xc7, 0xcb, 0x89, 0x60, 0xd3, 0xe0, 0x3f,
+	0xf4, 0x6f, 0xb3, 0xdc, 0xec, 0x35, 0x9f, 0xe6, 0xe6, 0xd9, 0x26, 0x42, 0x2a, 0x60, 0x9c, 0xbd,
+	0x86, 0x29, 0x3e, 0x66, 0x85, 0xd0, 0x8b, 0xa8, 0x62, 0xcb, 0x73, 0xde, 0x7b, 0xeb, 0x78, 0x25,
+	0x21, 0xe4, 0xa7, 0xd0, 0x8b, 0xb0, 0xf8, 0xfb, 0x7c, 0xc1, 0x05, 0x0a, 0x5c, 0xc8, 0xfb, 0xeb,
+	0x84, 0xa8, 0xb4, 0x90, 0xc4, 0xd5, 0xe7, 0x6a, 0x9e, 0xe5, 0x90, 0xac, 0xfc, 0x18, 0xcb, 0xbd,
+	0xc4, 0xc2, 0xf2, 0x43, 0x1c, 0x94, 0x5f, 0x29, 0x3a, 0xc4, 0xd1, 0xf1, 0x14, 0x4e, 0x69, 0xf1,
+	0x69, 0x38, 0x37, 0x1d, 0x9c, 0xc2, 0x41, 0xf8, 0x63, 0x3c, 0xa0, 0x2d, 0x2c, 0x2f, 0x14, 0x7f,
+	0xd2, 0x25, 0xe8, 0x38, 0x4c, 0xcb, 0x5d, 0xbe, 0x8f, 0x67, 0x23, 0x65, 0x55, 0xa1, 0xf4, 0xf7,
+	0xa2, 0x23, 0x2f, 0x90, 0x1e, 0xbb, 0x6d, 0xc5, 0x80, 0xf2, 0xa7, 0x89, 0x21, 0xc4, 0x72, 0x7e,
+	0x09, 0xd3, 0xaf, 0x30, 0x6d, 0x71, 0x1d, 0x41, 0x29, 0xfe, 0x43, 0x39, 0xa3, 0xf3, 0xe7, 0xd6,
+	0xa8, 0x5c, 0x49, 0x18, 0x02, 0x97, 0x4f, 0xbf, 0x2a, 0x97, 0x9b, 0xfa, 0x03, 0x7c, 0x31, 0xcb,
+	0x4d, 0x76, 0x65, 0xd5, 0x72, 0xe9, 0x1f, 0x53, 0x15, 0xcd, 0xa9, 0xcf, 0x3e, 0x5f, 0x50, 0xb9,
+	0xfd, 0x94, 0x5d, 0x5f, 0xb3, 0x66, 0x4b, 0x21, 0x9f, 0x5a, 0x0e, 0x79, 0x58, 0x28, 0x5e, 0xff,
+	0xc2, 0x8a, 0x6c, 0x59, 0xfc, 0x77, 0xf9, 0xf7, 0x33, 0x68, 0xde, 0xf1, 0x2c, 0x8c, 0x82, 0x63,
+	0xcc, 0x05, 0xba, 0x92, 0x0b, 0xb6, 0x29, 0xda, 0x9b, 0x8c, 0x1d, 0xfa, 0x91, 0x8f, 0xc7, 0x60,
+	0x8a, 0xf8, 0xe5, 0x43, 0x84, 0x22, 0xc5, 0x8d, 0xc9, 0x61, 0xb7, 0x86, 0x27, 0x7e, 0x66, 0xd6,
+	0x1d, 0xb6, 0xf3, 0x3a, 0x18, 0x47, 0x4a, 0x3d, 0x8e, 0x39, 0xe0, 0x5b, 0x67, 0x4a, 0x4b, 0xb2,
+	0x40, 0xf1, 0xce, 0x21, 0xb2, 0x78, 0x7f, 0xcc, 0x36, 0xd1, 0x2c, 0x21, 0xe5, 0x85, 0xf7, 0xcf,
+	0x14, 0x47, 0xb4, 0x10, 0xe3, 0xf4, 0x53, 0xff, 0x2e, 0xdb, 0x98, 0x7a, 0x60, 0x3a, 0xcc, 0x0f,
+	0xe5, 0x33, 0xf9, 0x39, 0x25, 0xc4, 0x2b, 0xff, 0xa1, 0xf7, 0x16, 0xac, 0x9f, 0x5b, 0xd3, 0x11,
+	0x53, 0x45, 0x9c, 0x19, 0x72, 0x39, 0x5c, 0xaa, 0xf2, 0xb7, 0x71, 0x1b, 0x58, 0x6d, 0xd7, 0x33,
+	0x9a, 0x48, 0xe5, 0x01, 0x7b, 0xef, 0x6c, 0x13, 0xea, 0x37, 0x59, 0x5e, 0xae, 0x00, 0x36, 0x44,
+	0xe4, 0xb3, 0xfe, 0x35, 0xb6, 0x9d, 0x2c, 0x5a, 0xd2, 0x9c, 0x60, 0x6b, 0xaa, 0x54, 0x2b, 0xe5,
+	0x36, 0x7a, 0xe3, 0x0a, 0xb3, 0xea, 0x9f, 0xc4, 0xab, 0x81, 0xcd, 0xb7, 0xeb, 0x6b, 0x12, 0x8f,
+	0x34, 0x7f, 0xf9, 0x01, 0x36, 0x29, 0x97, 0x8c, 0xcc, 0x53, 0x03, 0xfc, 0x50, 0x26, 0xc9, 0x9f,
+	0x5b, 0xa3, 0xf2, 0x01, 0xf6, 0x0a, 0xd7, 0x59, 0xf5, 0x17, 0x0e, 0x8a, 0x3f, 0xc9, 0x60, 0x27,
+	0x83, 0xeb, 0x3b, 0xf5, 0xa9, 0x25, 0xe7, 0x7f, 0x31, 0xf6, 0xc8, 0x52, 0xf4, 0xa4, 0xdf, 0x61,
+	0x45, 0xfc, 0xa5, 0x5a, 0x89, 0x21, 0x88, 0x17, 0x01, 0xea, 0x0a, 0x65, 0x92, 0x6d, 0xbe, 0xef,
+	0xb1, 0xcd, 0xa1, 0x3f, 0x9d, 0x0e, 0x66, 0x78, 0xb6, 0xdf, 0x59, 0x91, 0xe1, 0xc5, 0xf8, 0x2e,
+	0x11, 0x5a, 0x82, 0x43, 0xbf, 0xc7, 0xb6, 0xc6, 0xa3, 0x89, 0xe7, 0x46, 0xe3, 0xa9, 0xe7, 0x1f,
+	0x47, 0xd4, 0xff, 0x28, 0x02, 0xcc, 0x41, 0x10, 0x90, 0x1c, 0x0d, 0x82, 0x91, 0x24, 0xc1, 0xae,
+	0x5d, 0x11, 0x60, 0x82, 0xe4, 0x26, 0xcb, 0xcf, 0x83, 0xb1, 0x1f, 0x8c, 0xa3, 0x37, 0xd4, 0xba,
+	0x93, 0xcf, 0xfa, 0x2e, 0x2b, 0x60, 0x3f, 0x0c, 0x54, 0xc7, 0xc6, 0x5d, 0x1e, 0x01, 0x2d, 0xde,
+	0xbd, 0xf4, 0x8f, 0x23, 0x3c, 0x75, 0x63, 0xef, 0x6e, 0xd3, 0x3f, 0x8e, 0xf8, 0x71, 0x7b, 0x97,
+	0x15, 0x00, 0x85, 0xdb, 0x25, 0x76, 0xef, 0x80, 0x76, 0x8f, 0x67, 0x54, 0xd9, 0x40, 0x2d, 0xaa,
+	0x0d, 0xd4, 0xbf, 0xc4, 0x36, 0x78, 0x07, 0x86, 0x9f, 0x67, 0x8b, 0x0f, 0xae, 0xad, 0xee, 0xcf,
+	0x58, 0x48, 0xa4, 0x3f, 0x66, 0x5b, 0xca, 0x82, 0x87, 0xa5, 0x6d, 0xee, 0x60, 0xb7, 0xce, 0x8a,
+	0x35, 0x2b, 0xc1, 0x51, 0xfe, 0x49, 0x0a, 0x4b, 0x9f, 0x97, 0xc7, 0xc3, 0x2f, 0xbc, 0x08, 0x16,
+	0xf7, 0xb5, 0x37, 0x3e, 0x3c, 0x12, 0x3b, 0x18, 0x3d, 0x41, 0x91, 0xf5, 0x9a, 0x37, 0x86, 0xf8,
+	0x34, 0x71, 0x1b, 0x2b, 0x70, 0x08, 0x9f, 0xe8, 0x1d, 0x56, 0x44, 0x34, 0x4e, 0x15, 0x57, 0x17,
+	0x39, 0x70, 0xb2, 0x9f, 0xa8, 0x29, 0xe9, 0x7c, 0x41, 0xf0, 0x1f, 0xa9, 0x79, 0x84, 0xdb, 0x0e,
+	0x78, 0xde, 0xf7, 0x63, 0x2f, 0x59, 0xd7, 0x91, 0x93, 0xc4, 0xcb, 0x6e, 0x72, 0x3f, 0xf1, 0xde,
+	0x60, 0x77, 0x0d, 0xab, 0x52, 0xd4, 0xa9, 0x5b, 0x5e, 0x26, 0xb1, 0xe5, 0xc1, 0x74, 0xd0, 0x60,
+	0xeb, 0xa7, 0x83, 0x78, 0x4b, 0xd0, 0x95, 0x7f, 0x3b, 0xc5, 0x76, 0x78, 0x47, 0x70, 0x00, 0xcf,
+	0x50, 0x2f, 0x24, 0xdd, 0x2a, 0xb5, 0xe0, 0x56, 0xd7, 0xd9, 0xe6, 0x78, 0xa6, 0x9a, 0x3b, 0x37,
+	0x9e, 0x71, 0x5b, 0x2b, 0xa6, 0xcc, 0x9c, 0xcf, 0x94, 0x32, 0xae, 0xb3, 0x6a, 0x5c, 0x93, 0x79,
+	0x49, 0x9f, 0xf1, 0xec, 0x6c, 0x75, 0x7e, 0x55, 0xb6, 0x60, 0xd3, 0x6b, 0x02, 0x54, 0x0a, 0x5a,
+	0xec, 0xc3, 0x9e, 0x11, 0xf7, 0x71, 0x2e, 0xc9, 0x26, 0x72, 0x89, 0x8c, 0x82, 0x8d, 0xf3, 0x44,
+	0x81, 0x98, 0x5e, 0x4e, 0x99, 0xde, 0x3f, 0xce, 0x60, 0x11, 0xc3, 0x99, 0x02, 0x6f, 0xea, 0x9f,
+	0x78, 0xeb, 0x53, 0x97, 0x1a, 0xfb, 0xe9, 0x85, 0xd8, 0xff, 0xbe, 0x9c, 0x78, 0x86, 0x4f, 0xfc,
+	0xfd, 0xd5, 0x99, 0x89, 0x86, 0x38, 0x6b, 0xee, 0xd9, 0xe4, 0xdc, 0xef, 0xb1, 0xad, 0xd1, 0x71,
+	0x30, 0xa0, 0x42, 0x68, 0x28, 0xd2, 0x96, 0x80, 0xd9, 0xde, 0x10, 0xb6, 0x1e, 0x49, 0x32, 0x03,
+	0x1a, 0xcc, 0x5b, 0x92, 0xaf, 0x1b, 0x7a, 0xc3, 0xa5, 0xf4, 0xb7, 0xf9, 0xe5, 0xe9, 0x2f, 0xbf,
+	0x9c, 0xfe, 0xee, 0xb1, 0x2d, 0x5a, 0xc0, 0xa1, 0x7f, 0x3c, 0xc3, 0x4c, 0x96, 0xb5, 0x8a, 0x08,
+	0xab, 0x01, 0x08, 0x72, 0xc0, 0xcb, 0x37, 0x91, 0x47, 0x04, 0x8c, 0x13, 0x14, 0x00, 0x82, 0x68,
+	0xb9, 0x66, 0x6f, 0xce, 0xb1, 0x66, 0xe5, 0x3f, 0x49, 0xe3, 0x1e, 0x87, 0xdb, 0xd9, 0xcb, 0xc1,
+	0x6c, 0x74, 0xde, 0x17, 0x71, 0x0a, 0x87, 0x12, 0xac, 0x3a, 0xcb, 0x06, 0x83, 0xc8, 0xa3, 0xe5,
+	0xe3, 0xbf, 0xb9, 0xc2, 0xc7, 0x41, 0x18, 0xb9, 0xe1, 0xf8, 0x37, 0x3c, 0x72, 0xbd, 0x02, 0x87,
+	0xd8, 0xe3, 0xdf, 0xf0, 0xf4, 0x47, 0x2c, 0x3b, 0x0a, 0xfc, 0x39, 0xd5, 0x48, 0x67, 0x0e, 0x04,
+	0x74, 0x70, 0x7e, 0x82, 0x7f, 0xf5, 0xcf, 0x59, 0x71, 0x14, 0x0e, 0xe7, 0xb0, 0xe4, 0x83, 0xe0,
+	0x8b, 0xb5, 0x4d, 0x64, 0x95, 0x3d, 0x26, 0x6f, 0x5e, 0xb0, 0x18, 0x3c, 0x5a, 0xfc, 0x49, 0xef,
+	0xae, 0x2c, 0x96, 0x3e, 0x3c, 0x4b, 0xd8, 0xb9, 0x6a, 0xa5, 0xab, 0x58, 0xf7, 0x2f, 0x4c, 0xa1,
+	0xfc, 0x3d, 0x2c, 0xa1, 0x56, 0xab, 0x06, 0xf6, 0x9a, 0x07, 0xde, 0xd0, 0x9d, 0x78, 0x27, 0x9e,
+	0xa8, 0xdb, 0x0b, 0x00, 0x69, 0x03, 0xa0, 0x6c, 0xb0, 0xdd, 0x33, 0x54, 0x39, 0x4f, 0x81, 0x51,
+	0xfe, 0xb7, 0x94, 0x74, 0x50, 0xc6, 0x39, 0x73, 0xba, 0x24, 0x5e, 0xce, 0xe9, 0x72, 0x0f, 0x4d,
+	0xab, 0x7b, 0xa8, 0x5a, 0x25, 0x65, 0x12, 0x55, 0x92, 0xfe, 0x1d, 0xb6, 0x01, 0x9a, 0x8b, 0xb4,
+	0x5d, 0x3e, 0xcb, 0xd0, 0xf4, 0x1e, 0x14, 0x19, 0xca, 0x3f, 0x46, 0xcd, 0xbd, 0x20, 0xf0, 0x03,
+	0x77, 0x1a, 0x1e, 0xea, 0xf7, 0x59, 0x4e, 0xe9, 0x39, 0xac, 0x4a, 0xc3, 0x24, 0x80, 0xc8, 0xe4,
+	0x51, 0x22, 0xad, 0x1c, 0x25, 0x74, 0x96, 0xe5, 0x7d, 0xc5, 0x0c, 0xbd, 0x46, 0xf4, 0x47, 0xde,
+	0xca, 0x6c, 0xfd, 0x5b, 0x29, 0x5c, 0x39, 0x1c, 0x3e, 0xd1, 0x05, 0x01, 0x5d, 0x56, 0x9d, 0x52,
+	0x6e, 0xb0, 0xbc, 0x77, 0x8a, 0x1b, 0x1a, 0x0d, 0xb9, 0xe9, 0x9d, 0xce, 0x79, 0x53, 0x73, 0x71,
+	0xa9, 0x32, 0x67, 0xd4, 0x82, 0xaa, 0x16, 0x27, 0x14, 0xb3, 0xc7, 0x93, 0x68, 0x3c, 0x1f, 0xf0,
+	0x37, 0x6e, 0x3f, 0x3a, 0xf6, 0xc2, 0x48, 0xff, 0x34, 0x11, 0xb3, 0x77, 0x96, 0xad, 0x2a, 0x39,
+	0x94, 0x90, 0x5d, 0xbd, 0x78, 0x3a, 0xcb, 0xbe, 0xf4, 0x47, 0x6f, 0xb8, 0x4e, 0x5b, 0x16, 0xff,
+	0x5d, 0x8e, 0xc8, 0x9b, 0x95, 0x71, 0xe7, 0x93, 0x37, 0xbf, 0xec, 0x51, 0x7f, 0x37, 0x85, 0xef,
+	0x98, 0x47, 0x5e, 0x38, 0xe4, 0x3e, 0xf5, 0x2a, 0xe0, 0xbf, 0xf9, 0x78, 0x05, 0x6b, 0x73, 0xfa,
+	0x2a, 0xa8, 0x03, 0x0a, 0xdf, 0xe0, 0xc9, 0x57, 0x8d, 0x05, 0x2b, 0x77, 0xf4, 0x5a, 0x20, 0x42,
+	0x42, 0xe0, 0x8b, 0xe6, 0x5c, 0x88, 0x88, 0xdb, 0x8c, 0x85, 0x5e, 0x30, 0x1e, 0x4c, 0xdc, 0xd9,
+	0xf1, 0x94, 0x5b, 0xb8, 0x60, 0x15, 0x10, 0xd2, 0x3d, 0x9e, 0x02, 0xdf, 0x08, 0x87, 0xe5, 0xc9,
+	0xa5, 0x60, 0xe5, 0x46, 0x73, 0xe0, 0x2b, 0xff, 0x51, 0x8a, 0x5d, 0x93, 0x3b, 0x4e, 0x18, 0x0d,
+	0xa2, 0x50, 0xae, 0xc0, 0x19, 0xef, 0xd0, 0xd5, 0x02, 0x35, 0x7d, 0x46, 0x81, 0x9a, 0x59, 0x28,
+	0x50, 0xd7, 0x6d, 0xce, 0x0b, 0x85, 0xfe, 0xc6, 0x52, 0xa1, 0x2f, 0x77, 0x82, 0xdc, 0x79, 0x76,
+	0x82, 0x3f, 0xcc, 0x60, 0x61, 0x14, 0x4f, 0x4a, 0xdf, 0x61, 0xe9, 0xf1, 0x88, 0xbf, 0x99, 0xc9,
+	0x5a, 0xe9, 0xf1, 0x99, 0x17, 0x04, 0x16, 0x77, 0xd1, 0xf4, 0x39, 0x76, 0xd1, 0xcc, 0x8a, 0x5d,
+	0x54, 0x2d, 0x01, 0xb2, 0x0b, 0x25, 0xc0, 0x57, 0x73, 0xc0, 0x90, 0x8e, 0xb7, 0xa9, 0x3a, 0x5e,
+	0x6c, 0xe4, 0x7c, 0xc2, 0xc8, 0x5f, 0xe1, 0x7e, 0xfc, 0xff, 0xe8, 0x24, 0xf1, 0xc7, 0x29, 0xdc,
+	0x1f, 0x06, 0x87, 0x87, 0x81, 0x77, 0x38, 0x88, 0xbc, 0xff, 0x6f, 0x3c, 0xf4, 0xc7, 0xec, 0xc6,
+	0xea, 0x89, 0x41, 0x12, 0x5a, 0x5c, 0xa8, 0xd4, 0x97, 0x2d, 0x54, 0x7a, 0x71, 0xa1, 0x6e, 0x33,
+	0xc6, 0x87, 0x46, 0x34, 0x95, 0x29, 0x00, 0xe1, 0xe8, 0xf2, 0x9f, 0x67, 0x30, 0xf5, 0xa3, 0xf1,
+	0xe8, 0x1a, 0x87, 0x3b, 0x0f, 0xfc, 0xb9, 0x17, 0xf0, 0xfa, 0x54, 0x4d, 0x82, 0xcb, 0x95, 0xc3,
+	0x32, 0x9b, 0x9a, 0x0d, 0x0f, 0x16, 0x96, 0x1d, 0x9b, 0x59, 0x1f, 0x9f, 0x47, 0x8a, 0xca, 0xc7,
+	0xdf, 0x75, 0x29, 0xcf, 0xba, 0xc5, 0x8a, 0x33, 0xef, 0x34, 0x52, 0x6f, 0x8a, 0x14, 0x1f, 0xdc,
+	0x3f, 0x8f, 0x58, 0x85, 0x0d, 0x6a, 0x25, 0x78, 0xa4, 0xfb, 0x25, 0x7b, 0x8b, 0x6d, 0xad, 0x6f,
+	0x9d, 0x47, 0xde, 0x8a, 0xee, 0xd6, 0xf7, 0x58, 0xc6, 0x3f, 0x9d, 0xae, 0x2d, 0xdc, 0x56, 0x08,
+	0xf1, 0x4f, 0xa7, 0xcd, 0x0b, 0x16, 0x70, 0x81, 0xc5, 0x56, 0x54, 0x6c, 0xe7, 0xb2, 0xd8, 0x99,
+	0x95, 0x9b, 0x78, 0xeb, 0x51, 0x3e, 0x64, 0x5f, 0x3b, 0x87, 0xc5, 0x97, 0x02, 0x36, 0xf5, 0x73,
+	0x07, 0xec, 0xe7, 0xac, 0xfc, 0xe5, 0x6b, 0xa0, 0xbf, 0xcf, 0x76, 0xe2, 0x47, 0x77, 0x3c, 0xc2,
+	0x91, 0xb6, 0xad, 0x2d, 0xb9, 0x32, 0xad, 0x51, 0x58, 0xb6, 0xb1, 0xc5, 0xb6, 0xde, 0xfe, 0xbf,
+	0x48, 0x1b, 0xec, 0xb3, 0x75, 0x8e, 0x0f, 0xeb, 0x01, 0xbb, 0xa4, 0x7f, 0x3a, 0xe5, 0x1a, 0x65,
+	0xf0, 0xe2, 0x8c, 0x7f, 0x3a, 0x05, 0x5d, 0xfe, 0x61, 0x6a, 0xad, 0x05, 0xcf, 0x2c, 0x58, 0x57,
+	0xbc, 0x19, 0x4a, 0x14, 0x51, 0x99, 0x64, 0x11, 0xf5, 0x2d, 0x96, 0xb8, 0x0d, 0xe2, 0x52, 0xb5,
+	0x04, 0x9a, 0x68, 0x2a, 0xa2, 0x0e, 0x95, 0xd3, 0xef, 0xa5, 0x99, 0xbe, 0xa4, 0x53, 0x78, 0x56,
+	0x4e, 0x14, 0x57, 0xd4, 0xd2, 0xca, 0x15, 0xb5, 0x0f, 0xd8, 0x8e, 0xd2, 0x8a, 0x84, 0xfc, 0x95,
+	0xe1, 0xc9, 0x64, 0x3b, 0xee, 0x45, 0x42, 0x2e, 0x57, 0xc9, 0x78, 0xa3, 0x93, 0xd2, 0xa3, 0x24,
+	0x7b, 0x06, 0x40, 0xe5, 0x82, 0xd1, 0x46, 0xe2, 0x82, 0xd1, 0x1d, 0x56, 0x9c, 0x0e, 0x4e, 0x5d,
+	0x6f, 0x16, 0x05, 0x63, 0x2f, 0xa4, 0xad, 0x8c, 0x4d, 0x07, 0xa7, 0x26, 0x42, 0xf4, 0x3d, 0x38,
+	0x27, 0xf0, 0xf4, 0x03, 0xf8, 0x4d, 0xbe, 0x9a, 0xe7, 0x09, 0x23, 0xc8, 0x57, 0x96, 0xc2, 0x5a,
+	0xfe, 0x49, 0x0a, 0x1b, 0xee, 0x48, 0x8a, 0x7b, 0xff, 0xd9, 0x7b, 0x3d, 0xb8, 0xc6, 0x89, 0x9a,
+	0x49, 0xb7, 0xad, 0x22, 0xc2, 0x30, 0x97, 0xde, 0x63, 0x5b, 0x13, 0xdf, 0xff, 0xe2, 0x78, 0xae,
+	0x64, 0xd3, 0xac, 0x55, 0x44, 0x18, 0x92, 0x7c, 0x8d, 0x6d, 0x73, 0xdb, 0x79, 0x23, 0xa2, 0xc9,
+	0x52, 0x3f, 0x17, 0x81, 0x98, 0x74, 0x3f, 0xc1, 0x42, 0x4b, 0x5e, 0x42, 0x8b, 0xb7, 0xb1, 0x75,
+	0x17, 0xb9, 0xca, 0x7f, 0x4a, 0x75, 0x4c, 0xcc, 0xb3, 0xfe, 0xd2, 0xd7, 0x6d, 0xc6, 0x82, 0x53,
+	0xea, 0x98, 0x84, 0x62, 0x47, 0x08, 0x4e, 0xfb, 0x08, 0x00, 0x74, 0x14, 0xa3, 0x71, 0x0e, 0x85,
+	0x48, 0xa2, 0x6f, 0xb0, 0x7c, 0x70, 0xea, 0xc2, 0x06, 0x12, 0x92, 0xf2, 0x9b, 0xc1, 0x69, 0x15,
+	0x1e, 0xb9, 0xf5, 0x04, 0x0a, 0xb7, 0xbd, 0xcd, 0x88, 0x50, 0x38, 0x26, 0x1c, 0x03, 0xe7, 0xde,
+	0x88, 0xaf, 0x2a, 0x1f, 0xb3, 0x8e, 0x00, 0x1a, 0x53, 0xa0, 0x37, 0xc5, 0x98, 0x02, 0xbd, 0xcb,
+	0x0a, 0xc1, 0x29, 0x1e, 0x3f, 0x42, 0x2a, 0x55, 0xf2, 0xc1, 0xa9, 0xc9, 0x9f, 0x01, 0x19, 0x49,
+	0x24, 0x56, 0x2a, 0xf9, 0x48, 0x20, 0xef, 0xb2, 0xad, 0xe0, 0xd4, 0x7d, 0x15, 0x0c, 0xa6, 0x1e,
+	0x90, 0x50, 0xa1, 0xc2, 0x82, 0xd3, 0x06, 0x80, 0x4c, 0x7e, 0x6f, 0xb2, 0x18, 0x9c, 0xba, 0xfe,
+	0x89, 0x17, 0x70, 0x82, 0xa2, 0x50, 0xad, 0x77, 0xe2, 0x05, 0x80, 0xbf, 0xc5, 0x35, 0x1f, 0x06,
+	0x43, 0x8e, 0xde, 0x12, 0x83, 0xd7, 0x82, 0x21, 0x72, 0xb3, 0xa1, 0x3f, 0x99, 0x8c, 0x43, 0xaa,
+	0x5b, 0x68, 0xaf, 0x17, 0x90, 0xa5, 0x0a, 0x71, 0xe7, 0x1c, 0x15, 0xe2, 0xc5, 0xe5, 0x0a, 0xb1,
+	0xfc, 0x10, 0x5b, 0xfc, 0xd8, 0x12, 0x5c, 0x2a, 0x6d, 0xd6, 0xbd, 0x1c, 0x3b, 0xc0, 0xb8, 0xc7,
+	0x2e, 0x20, 0x3a, 0x9c, 0x17, 0xfc, 0xdf, 0x17, 0x0d, 0xe5, 0x9f, 0xa4, 0x31, 0x74, 0x14, 0x75,
+	0xce, 0x50, 0x83, 0x2f, 0x9f, 0xf7, 0x2a, 0x11, 0x37, 0xf9, 0xc0, 0x7b, 0x25, 0x83, 0x26, 0xa1,
+	0x4d, 0xe6, 0xcb, 0xb4, 0xc9, 0x2e, 0x96, 0x30, 0x5f, 0x55, 0x2f, 0xab, 0xca, 0xb6, 0xc8, 0x52,
+	0x7c, 0x46, 0x94, 0x5b, 0xee, 0xac, 0x69, 0xae, 0x0a, 0x73, 0x5a, 0x45, 0x7c, 0xb6, 0x81, 0x07,
+	0x8e, 0x6d, 0x3b, 0xb1, 0x65, 0xf8, 0xe1, 0xed, 0xcb, 0xee, 0x3c, 0x9e, 0xd9, 0xfa, 0x4d, 0xaf,
+	0x6d, 0xfd, 0x66, 0xce, 0xd9, 0xfa, 0x3d, 0x51, 0x97, 0x0a, 0xd2, 0xea, 0x1b, 0xd0, 0x48, 0x1e,
+	0x25, 0x8b, 0x6b, 0x35, 0x02, 0x12, 0xbc, 0xa1, 0xaa, 0x3f, 0xc0, 0x5b, 0xc8, 0xa2, 0x42, 0xbb,
+	0xb5, 0x86, 0x83, 0xd3, 0xe0, 0x1d, 0xe5, 0xb0, 0xfc, 0x77, 0x52, 0xe8, 0x7c, 0x88, 0x92, 0x9b,
+	0xce, 0x15, 0xb6, 0xc1, 0xef, 0x1a, 0x8a, 0x37, 0xb3, 0xfc, 0x61, 0xe9, 0x6a, 0x6e, 0x7a, 0xf9,
+	0x6a, 0x2e, 0x78, 0x01, 0xec, 0x0c, 0x5c, 0x9e, 0xd8, 0x75, 0x0b, 0xd3, 0xc1, 0x29, 0xaf, 0xc6,
+	0x43, 0xbd, 0x94, 0x6c, 0xf2, 0x6f, 0xc7, 0x3b, 0xf9, 0x77, 0xd4, 0xd6, 0xd1, 0x72, 0xfb, 0xe0,
+	0x8c, 0xd7, 0x5a, 0xbf, 0x8e, 0x2f, 0x8c, 0x95, 0xb6, 0x0c, 0xfa, 0x7a, 0x85, 0x5d, 0x22, 0x9f,
+	0xe5, 0x40, 0x35, 0x8c, 0x2e, 0x22, 0xa2, 0x3a, 0x98, 0x61, 0x32, 0xd7, 0xbf, 0xce, 0x2e, 0x72,
+	0xe7, 0x55, 0x28, 0x31, 0x9e, 0xb6, 0x01, 0x2c, 0xe9, 0xca, 0x7f, 0x40, 0x31, 0x85, 0x83, 0xc9,
+	0x98, 0x5a, 0xa3, 0xda, 0x42, 0xdd, 0x9e, 0x5e, 0xa8, 0xdb, 0x61, 0xd4, 0xb8, 0x25, 0xae, 0x06,
+	0xd6, 0x36, 0x82, 0x5b, 0x33, 0xa4, 0x2b, 0x33, 0xae, 0x46, 0x4c, 0x85, 0xd1, 0x55, 0x04, 0xa0,
+	0xa0, 0xf9, 0xaa, 0xe2, 0xeb, 0x31, 0x63, 0xb1, 0x0d, 0x29, 0xba, 0xee, 0x9d, 0xd5, 0x03, 0x43,
+	0x7f, 0x2a, 0xc0, 0x6f, 0x8c, 0xae, 0xdf, 0xc4, 0xb6, 0x3a, 0x92, 0x9c, 0xf9, 0x29, 0x80, 0x6a,
+	0xb9, 0xf4, 0x9a, 0x2e, 0x5c, 0xe6, 0xe7, 0xed, 0xc2, 0xfd, 0x2b, 0x72, 0x69, 0x24, 0x90, 0x2e,
+	0x4d, 0x17, 0xe1, 0xf1, 0x9d, 0x75, 0x4a, 0x5e, 0x84, 0xef, 0xf0, 0x97, 0xa6, 0xb7, 0x69, 0xd2,
+	0xe8, 0xf4, 0xb4, 0x4e, 0x00, 0x71, 0x56, 0x3a, 0x7e, 0x66, 0x85, 0xe3, 0x93, 0x7c, 0xd1, 0x3a,
+	0x14, 0xf2, 0xc1, 0x75, 0x24, 0x72, 0xe8, 0x4f, 0xfc, 0x80, 0x56, 0x06, 0x90, 0x35, 0x78, 0x2e,
+	0xff, 0x58, 0x75, 0x29, 0x8c, 0xfd, 0xcf, 0x64, 0xdd, 0x95, 0x5a, 0x73, 0x83, 0x46, 0xb5, 0xae,
+	0x2c, 0xcb, 0xbe, 0x34, 0x03, 0x28, 0x6e, 0x2b, 0x32, 0xc0, 0x09, 0xbb, 0xc7, 0xbb, 0x86, 0x89,
+	0x7e, 0xa1, 0x0c, 0xbf, 0xa3, 0xd5, 0x37, 0xa4, 0x52, 0x5f, 0x52, 0x07, 0x2f, 0x34, 0x13, 0x45,
+	0xa3, 0x30, 0xa3, 0x34, 0x0a, 0x27, 0xb8, 0x57, 0x26, 0xc6, 0xfd, 0xe5, 0x8d, 0x66, 0x62, 0x5b,
+	0xf2, 0x47, 0xc7, 0xde, 0x31, 0xd5, 0xf9, 0x34, 0x16, 0x6f, 0xea, 0x60, 0xdd, 0x29, 0xbc, 0x42,
+	0x9e, 0x9b, 0x35, 0x96, 0x89, 0x6f, 0xe4, 0xc0, 0xcf, 0x72, 0x80, 0x4a, 0x2b, 0x62, 0xa6, 0xe3,
+	0x99, 0xcb, 0xdf, 0x24, 0xd4, 0x58, 0x51, 0x91, 0x4b, 0xeb, 0xb6, 0xec, 0xb6, 0x4b, 0x1a, 0x60,
+	0xb5, 0xdb, 0x94, 0xfd, 0xdf, 0xc5, 0x57, 0x14, 0xab, 0xc6, 0x1c, 0x9c, 0xfe, 0x92, 0xc7, 0xfc,
+	0x47, 0xd4, 0xa8, 0x51, 0x38, 0x13, 0xd6, 0xff, 0x4a, 0x06, 0x3e, 0xcf, 0xe1, 0x6a, 0xd5, 0x5a,
+	0xfe, 0xed, 0x14, 0x26, 0x18, 0x4a, 0x9d, 0x7c, 0x10, 0xf0, 0x07, 0x1c, 0x2d, 0x4e, 0xc2, 0xfc,
+	0x19, 0x8f, 0x49, 0x4a, 0xdb, 0x08, 0x2f, 0x5c, 0x55, 0x13, 0xe7, 0x93, 0x75, 0x9d, 0xfe, 0x35,
+	0xfa, 0xd3, 0xd1, 0xe4, 0x01, 0x5e, 0xa7, 0x40, 0xa2, 0x43, 0x5e, 0x6b, 0x40, 0x14, 0xca, 0x2d,
+	0x6b, 0xc5, 0x45, 0xaf, 0xf2, 0x21, 0x9e, 0x57, 0x57, 0xf0, 0xcc, 0x27, 0x6f, 0x56, 0x5e, 0x0d,
+	0xfb, 0x8c, 0xe5, 0x38, 0xb5, 0xf8, 0xae, 0xe2, 0xf6, 0xba, 0xb7, 0xaa, 0x9c, 0xca, 0x22, 0xe2,
+	0xb2, 0xb9, 0x74, 0x8b, 0x0a, 0xed, 0xb4, 0xe6, 0x35, 0x80, 0xb4, 0x5d, 0x26, 0x61, 0xbb, 0x72,
+	0x47, 0x75, 0xbe, 0xf3, 0x9d, 0x72, 0x12, 0xe2, 0xd2, 0x49, 0x71, 0x7f, 0x46, 0xa7, 0x39, 0x45,
+	0xde, 0x2f, 0x22, 0x27, 0x71, 0x86, 0xc9, 0x2c, 0x9d, 0x61, 0x94, 0x83, 0x51, 0x76, 0xf1, 0x60,
+	0x94, 0x38, 0x87, 0x6c, 0x2c, 0x9c, 0x43, 0x16, 0xf7, 0xd0, 0xdc, 0x39, 0xf6, 0xd0, 0xcd, 0x15,
+	0xe7, 0x80, 0x29, 0x3a, 0x68, 0xe0, 0x4f, 0x3c, 0x69, 0xae, 0x87, 0x2c, 0x0b, 0xcf, 0x6b, 0xdf,
+	0x59, 0x0e, 0xfd, 0x59, 0x14, 0xf8, 0x93, 0x89, 0x17, 0x70, 0x3e, 0x8b, 0x53, 0xc3, 0x70, 0x87,
+	0xde, 0xcc, 0xa3, 0x01, 0xc9, 0x10, 0x59, 0x6b, 0x2b, 0x06, 0xb6, 0x46, 0xe5, 0xdf, 0xa1, 0x80,
+	0x18, 0x84, 0x6f, 0x66, 0x43, 0xb1, 0xe3, 0xbe, 0xcf, 0x76, 0xe2, 0xda, 0x82, 0xf7, 0x38, 0xa9,
+	0x29, 0x23, 0x4a, 0x0b, 0xde, 0xe5, 0xfc, 0x90, 0x69, 0xca, 0x57, 0x54, 0xe2, 0x5a, 0x0e, 0xd0,
+	0xed, 0x00, 0xdc, 0xe6, 0x60, 0x4e, 0x59, 0x61, 0x97, 0x12, 0x6f, 0xb1, 0x39, 0x29, 0xd6, 0x77,
+	0x17, 0x01, 0x61, 0x21, 0x9c, 0x5f, 0x75, 0xfa, 0x82, 0xed, 0xf0, 0x7d, 0xb5, 0xe3, 0x8f, 0xf6,
+	0xe7, 0x23, 0xc8, 0x54, 0xd8, 0xae, 0xc7, 0xb7, 0x22, 0xe9, 0x31, 0xff, 0xc8, 0x47, 0xbe, 0xb3,
+	0xa3, 0xdd, 0xea, 0xe6, 0xfa, 0xb7, 0x7a, 0x16, 0x96, 0x09, 0x1d, 0x7f, 0xb4, 0xe2, 0x7b, 0xcd,
+	0x3e, 0xbb, 0xc8, 0x07, 0xe3, 0xc5, 0x87, 0xc5, 0xe3, 0xe8, 0x07, 0xac, 0xa8, 0xec, 0x74, 0x6b,
+	0xfb, 0x5e, 0xea, 0x6e, 0xc8, 0xa6, 0x52, 0x46, 0x79, 0xcc, 0x2e, 0x36, 0x26, 0xfe, 0x6b, 0xde,
+	0xb9, 0x5a, 0xa3, 0xff, 0x43, 0x96, 0x17, 0xb7, 0x8d, 0x48, 0xfd, 0x1b, 0x6b, 0xaf, 0x23, 0x59,
+	0x9b, 0xf0, 0x6b, 0xb5, 0xf2, 0x3f, 0x62, 0x57, 0x60, 0x28, 0x5e, 0x1d, 0x9f, 0x35, 0xde, 0xb7,
+	0x59, 0x41, 0xde, 0x5b, 0x59, 0x6b, 0x2f, 0x49, 0x61, 0xe1, 0xd1, 0x64, 0xf5, 0x90, 0xdf, 0x65,
+	0x1b, 0x30, 0x64, 0xa8, 0x7f, 0xc2, 0x36, 0xc6, 0x91, 0x37, 0x15, 0xf6, 0xd9, 0x5d, 0x3d, 0x01,
+	0x2a, 0x16, 0x38, 0x65, 0xf9, 0xfb, 0x2c, 0xc7, 0x6d, 0x1d, 0x42, 0xa9, 0xa1, 0x32, 0xaf, 0x33,
+	0x2e, 0x2f, 0x67, 0x04, 0xf7, 0x63, 0xc6, 0xe4, 0x64, 0xcf, 0x21, 0x41, 0x39, 0x0c, 0x09, 0x09,
+	0x63, 0x56, 0x04, 0x09, 0xb5, 0xa3, 0xc1, 0xec, 0xd0, 0x0b, 0xf5, 0x6f, 0xb2, 0x5c, 0xe4, 0xbb,
+	0x83, 0x91, 0xb8, 0x13, 0xaa, 0x27, 0x64, 0xf0, 0x59, 0x5a, 0x1b, 0x91, 0x6f, 0x8c, 0x46, 0xfa,
+	0x7d, 0x56, 0x88, 0x7c, 0x72, 0x5e, 0x32, 0xe0, 0x2a, 0xea, 0x7c, 0xe4, 0xa3, 0x23, 0x43, 0x19,
+	0xa9, 0x49, 0x6d, 0xc5, 0x80, 0x1f, 0x2d, 0x0c, 0x78, 0x7d, 0x49, 0x04, 0x4e, 0x4e, 0x8c, 0xfa,
+	0x70, 0x79, 0xd4, 0xb5, 0x2c, 0x72, 0x68, 0xe2, 0x3a, 0xe6, 0x9e, 0x40, 0x7d, 0xf1, 0xb3, 0xb8,
+	0xd0, 0x65, 0xca, 0x36, 0xcb, 0xf7, 0x29, 0xb4, 0x57, 0xb9, 0x8f, 0x4c, 0x06, 0x6b, 0xdd, 0x47,
+	0x52, 0x58, 0x79, 0x91, 0x23, 0xca, 0xcf, 0x58, 0x01, 0x85, 0xf6, 0x8e, 0xa3, 0x25, 0xa9, 0xdf,
+	0x65, 0x2c, 0xbe, 0xaa, 0x44, 0x62, 0x77, 0xd7, 0x89, 0xf5, 0x8f, 0x23, 0x8b, 0x94, 0xe8, 0x1d,
+	0x47, 0xe5, 0xff, 0x9a, 0x62, 0x45, 0xb4, 0xaa, 0x79, 0xe2, 0xcd, 0x96, 0x65, 0xff, 0x65, 0x56,
+	0x54, 0x12, 0xd3, 0xda, 0x82, 0x56, 0xa1, 0x69, 0x5e, 0xb0, 0x58, 0x9c, 0xb3, 0xc0, 0xbd, 0x78,
+	0x96, 0x27, 0x03, 0x2e, 0x4f, 0x57, 0xbe, 0xa6, 0x6f, 0x5e, 0xb0, 0x90, 0x54, 0x37, 0xd9, 0x76,
+	0xe2, 0xfb, 0xce, 0xb5, 0x5f, 0x11, 0x26, 0xa8, 0x9a, 0x17, 0xac, 0x2d, 0x04, 0xe0, 0xd0, 0xd5,
+	0x4d, 0xb6, 0xe1, 0xc1, 0xa4, 0xca, 0x26, 0xdb, 0x82, 0xa5, 0x92, 0xb7, 0x6e, 0x3f, 0x63, 0x39,
+	0x1e, 0x16, 0xc2, 0xe7, 0xbf, 0xac, 0xac, 0x47, 0xe2, 0xca, 0x7f, 0x49, 0xb1, 0xa2, 0x9c, 0xec,
+	0xcc, 0xd7, 0x35, 0xb6, 0xd5, 0x6b, 0xf4, 0xfb, 0x6e, 0xab, 0x7b, 0x60, 0xb4, 0x5b, 0x75, 0xed,
+	0x82, 0xae, 0xb1, 0x3c, 0x87, 0x74, 0x8c, 0xe7, 0xda, 0xdb, 0x9f, 0xbd, 0x7b, 0xb7, 0xa9, 0x5f,
+	0x91, 0x34, 0x6e, 0xbf, 0x67, 0x39, 0xda, 0xff, 0x78, 0x07, 0x50, 0x9d, 0x31, 0x0e, 0x75, 0x8c,
+	0x6a, 0xdb, 0xd4, 0xfe, 0x27, 0x87, 0x5d, 0x66, 0x45, 0x0e, 0xeb, 0xf6, 0xac, 0x8e, 0xd1, 0xd6,
+	0xfe, 0x22, 0x41, 0xd8, 0x68, 0xf7, 0x7a, 0x75, 0xed, 0x7f, 0x71, 0x98, 0x18, 0xc4, 0x68, 0xb7,
+	0xb5, 0x9f, 0x72, 0xc8, 0x75, 0x76, 0x91, 0x43, 0x6a, 0xbd, 0xae, 0x63, 0xf5, 0xda, 0x6d, 0xd3,
+	0xd2, 0xfe, 0x77, 0x82, 0xbd, 0xdd, 0xab, 0x19, 0x6d, 0xed, 0x67, 0x49, 0xf6, 0xee, 0x0b, 0xed,
+	0x1d, 0x40, 0x2a, 0xff, 0x7e, 0x03, 0xdf, 0x9e, 0xf3, 0x22, 0x64, 0x87, 0xb3, 0x38, 0x6e, 0xd3,
+	0x6c, 0xb7, 0x7b, 0xda, 0x05, 0xf9, 0x6c, 0x5a, 0x56, 0xcf, 0xd2, 0x52, 0xfa, 0x55, 0x76, 0x09,
+	0x9f, 0x6b, 0xcd, 0x9e, 0x6b, 0x99, 0x4f, 0xf7, 0x4d, 0xdb, 0xd1, 0xd2, 0xfa, 0x65, 0xae, 0x82,
+	0x04, 0xf7, 0xdb, 0x2f, 0xb4, 0x4c, 0x4c, 0xfb, 0xbc, 0x6f, 0x5a, 0xad, 0x8e, 0xd9, 0x75, 0x4c,
+	0x4b, 0xcb, 0xea, 0x37, 0xd8, 0x55, 0x0e, 0x6e, 0x98, 0x86, 0xb3, 0x6f, 0x99, 0xb6, 0x14, 0xb3,
+	0xa1, 0x5f, 0x67, 0x97, 0x17, 0x51, 0x20, 0x2a, 0xa7, 0xef, 0xb2, 0xeb, 0x1c, 0xb1, 0x67, 0x3a,
+	0x30, 0xcd, 0x46, 0x6b, 0x4f, 0x72, 0x6d, 0x4a, 0x81, 0x09, 0x24, 0xf0, 0xe5, 0xa5, 0x5e, 0xb6,
+	0x44, 0x69, 0x05, 0x5d, 0x67, 0x3b, 0x1c, 0xd8, 0x37, 0x6a, 0x4f, 0x4c, 0xc7, 0x6d, 0x75, 0x35,
+	0x26, 0x75, 0x6d, 0xb4, 0x7b, 0xcf, 0x5c, 0xcb, 0xec, 0xf4, 0x0e, 0xcc, 0xba, 0x56, 0xd4, 0xaf,
+	0x30, 0x0d, 0x49, 0x7b, 0x96, 0xe3, 0xda, 0x8e, 0xe1, 0xec, 0xdb, 0xda, 0x96, 0x94, 0x4a, 0x02,
+	0x7a, 0xfb, 0x8e, 0xb6, 0xad, 0x5f, 0x62, 0xdb, 0xb1, 0x84, 0x4e, 0xaf, 0xae, 0xed, 0xc8, 0x81,
+	0xf6, 0xac, 0xde, 0x7e, 0x9f, 0xc3, 0x2e, 0x4a, 0x32, 0x2e, 0x11, 0x40, 0x9a, 0x24, 0xe3, 0xee,
+	0xc0, 0x61, 0x97, 0xf4, 0x9b, 0xec, 0x1a, 0x87, 0x75, 0xf6, 0xdb, 0x4e, 0xab, 0x6f, 0x58, 0x8e,
+	0x9c, 0xaf, 0xae, 0x97, 0xd8, 0x95, 0x25, 0x1c, 0x4c, 0xf7, 0xb2, 0xc4, 0x54, 0x0d, 0xcb, 0x6a,
+	0x99, 0x96, 0xe4, 0xb9, 0xa2, 0x5f, 0x63, 0xfa, 0x02, 0x06, 0x38, 0xae, 0xea, 0xf7, 0xd8, 0x6d,
+	0x0e, 0x7f, 0xba, 0x6f, 0xee, 0x9b, 0xab, 0xcc, 0x7b, 0x4d, 0xbf, 0xc3, 0x76, 0xd7, 0x91, 0x80,
+	0x8c, 0xeb, 0xd2, 0x76, 0x56, 0xaf, 0x6d, 0x4a, 0xbe, 0x92, 0xb4, 0x12, 0x81, 0x81, 0xf6, 0x86,
+	0x9c, 0x17, 0x88, 0x31, 0xec, 0x17, 0xdd, 0x9a, 0x64, 0xb8, 0x29, 0xb5, 0x57, 0x71, 0xc0, 0xb5,
+	0x2b, 0x2d, 0x64, 0x0b, 0x8c, 0x76, 0x4b, 0xc2, 0x3a, 0xa6, 0x63, 0x5a, 0xdc, 0x6a, 0xb7, 0x2b,
+	0x35, 0xbc, 0x7e, 0xb2, 0xf0, 0x17, 0x20, 0x88, 0xb4, 0xc9, 0xd7, 0x5a, 0xc4, 0x2a, 0x0e, 0x06,
+	0xb0, 0x03, 0xd3, 0xb2, 0x5b, 0xbd, 0x6e, 0xb5, 0xe5, 0x74, 0x8c, 0xbe, 0x96, 0xaa, 0x78, 0x58,
+	0xc6, 0xd1, 0x91, 0x00, 0x5b, 0x24, 0xe8, 0x07, 0x35, 0xb7, 0x61, 0x19, 0x7b, 0x22, 0x44, 0x2f,
+	0x90, 0x5c, 0x82, 0xd6, 0xad, 0x5e, 0x5f, 0x4b, 0xd1, 0xac, 0x09, 0x66, 0x99, 0x86, 0xdd, 0xd1,
+	0xd2, 0x49, 0xc2, 0x8e, 0x61, 0x3f, 0xd1, 0x32, 0x95, 0xc7, 0x38, 0x0c, 0xbe, 0x42, 0xa1, 0x6a,
+	0x91, 0x9c, 0xa3, 0xa6, 0xe8, 0x49, 0xce, 0x5d, 0x73, 0xeb, 0x66, 0xdf, 0x32, 0x6b, 0x86, 0x63,
+	0xd6, 0x85, 0x84, 0x5f, 0xc3, 0xaf, 0xc5, 0xf1, 0x56, 0x3d, 0xb1, 0xaa, 0x53, 0xdc, 0x61, 0x05,
+	0x04, 0x41, 0x3e, 0xfa, 0x59, 0x2a, 0x7e, 0x86, 0xd4, 0xf1, 0x2e, 0x55, 0xf9, 0x77, 0x54, 0xb0,
+	0x26, 0x1a, 0x28, 0x98, 0xd5, 0x54, 0x0d, 0xe4, 0x8c, 0xc0, 0xb1, 0x21, 0x06, 0x6c, 0x2d, 0x25,
+	0x0d, 0x82, 0x3e, 0x8b, 0xd0, 0xb4, 0x24, 0x95, 0xe1, 0x62, 0x6b, 0x59, 0x49, 0x8a, 0x51, 0x80,
+	0xd0, 0x3c, 0xe9, 0x5b, 0x73, 0x5b, 0x7d, 0xb2, 0xd2, 0x5d, 0x49, 0x88, 0x8e, 0x86, 0x84, 0x8f,
+	0xf5, 0x6b, 0xdc, 0xbb, 0x48, 0x66, 0xb5, 0xdd, 0xab, 0x3d, 0x31, 0xeb, 0xda, 0xdb, 0x74, 0xe5,
+	0x44, 0xf9, 0xeb, 0x03, 0x09, 0xf3, 0xad, 0x50, 0x5e, 0xb0, 0xd7, 0x7b, 0xcf, 0xba, 0x5a, 0x2a,
+	0xa6, 0xeb, 0x42, 0xb2, 0xaa, 0x1d, 0x68, 0x59, 0x91, 0xcc, 0x39, 0xa8, 0xf1, 0xac, 0xae, 0xdd,
+	0xa5, 0x88, 0x41, 0x48, 0x9c, 0x29, 0x1e, 0x57, 0xfe, 0xca, 0xc2, 0xcb, 0x23, 0x61, 0xfa, 0xbe,
+	0xbd, 0x3c, 0xac, 0xed, 0xb6, 0x5b, 0xdd, 0x27, 0x0b, 0xc3, 0xda, 0x72, 0x16, 0x69, 0x4a, 0xaf,
+	0x9c, 0xee, 0xc0, 0xd4, 0xb2, 0x95, 0x3f, 0x4d, 0xe3, 0x27, 0x3a, 0x5c, 0xba, 0x6c, 0x9a, 0x11,
+	0x63, 0x43, 0x19, 0x40, 0x82, 0x3e, 0xf9, 0xb8, 0x53, 0x75, 0x9b, 0xf5, 0x58, 0x3c, 0x81, 0x1a,
+	0x75, 0xe9, 0x77, 0x1c, 0x44, 0x64, 0xd9, 0x45, 0x58, 0xa3, 0xae, 0xe5, 0xc5, 0xec, 0x1b, 0xee,
+	0x27, 0x7b, 0x9c, 0x4a, 0x4b, 0x42, 0x1a, 0x60, 0x0f, 0x45, 0x3c, 0x82, 0x1e, 0xeb, 0xba, 0x00,
+	0x3d, 0x24, 0xd0, 0x5b, 0xf0, 0xff, 0x58, 0x3c, 0x01, 0xd3, 0xfa, 0x25, 0x29, 0xcd, 0x41, 0x10,
+	0x18, 0xbc, 0x88, 0xa0, 0x9e, 0xd3, 0x34, 0x2d, 0xed, 0x6d, 0x3e, 0x26, 0xaa, 0xf5, 0xfa, 0x7d,
+	0x00, 0x69, 0x31, 0x51, 0xa3, 0x55, 0x05, 0xc8, 0xdd, 0x78, 0x48, 0x63, 0xdf, 0xe9, 0x75, 0xcd,
+	0x3d, 0xed, 0xed, 0x63, 0xfd, 0x92, 0xa0, 0xea, 0x1b, 0xfb, 0xb6, 0xa9, 0xbd, 0x7d, 0x9b, 0xd2,
+	0xaf, 0x71, 0x57, 0x12, 0x20, 0xc8, 0x19, 0x1d, 0xed, 0xed, 0xdb, 0x74, 0xa5, 0xae, 0x38, 0x0d,
+	0x5d, 0xfc, 0xdd, 0xe6, 0x51, 0xd1, 0xb7, 0x5c, 0xa3, 0x8e, 0x7b, 0xf8, 0x16, 0x3e, 0xd6, 0xcd,
+	0xb6, 0xe9, 0x98, 0x5a, 0x2a, 0x86, 0x74, 0x7a, 0xf5, 0x56, 0xe3, 0x85, 0x96, 0xae, 0x34, 0xb0,
+	0x8d, 0xb5, 0xf4, 0x47, 0x25, 0xc8, 0x83, 0xeb, 0xe6, 0x01, 0xa4, 0xc8, 0xae, 0x59, 0x73, 0x4c,
+	0x10, 0x89, 0xbb, 0x1a, 0x40, 0xeb, 0x2d, 0x3b, 0x46, 0xa4, 0x2a, 0x9f, 0xa2, 0x2b, 0xc5, 0x7f,
+	0xc8, 0x81, 0x16, 0xa7, 0xc3, 0x83, 0xa7, 0x5b, 0x37, 0x2c, 0x60, 0x47, 0x05, 0x3b, 0x8e, 0xdb,
+	0x7b, 0xde, 0xd1, 0x52, 0x95, 0x2f, 0xe2, 0xbf, 0xd4, 0xc0, 0xff, 0xf4, 0x02, 0xe9, 0xf7, 0xbc,
+	0x53, 0x73, 0xbb, 0xcf, 0x3b, 0xee, 0xc7, 0x72, 0x0e, 0x02, 0xf2, 0x89, 0x96, 0xd2, 0x77, 0x79,
+	0x16, 0x01, 0x48, 0xaf, 0x6f, 0x76, 0x79, 0x24, 0x57, 0x0d, 0xbb, 0x55, 0x03, 0xa3, 0xe8, 0x37,
+	0xb8, 0x7e, 0x80, 0x4c, 0xec, 0xd4, 0xef, 0xde, 0x65, 0x2a, 0xff, 0x20, 0xcf, 0x2e, 0xaf, 0xf8,
+	0xe3, 0x07, 0x14, 0x1c, 0xcf, 0x41, 0xa9, 0x46, 0x55, 0x56, 0x37, 0x17, 0x28, 0xbd, 0xab, 0xf0,
+	0xe6, 0x0b, 0xc4, 0xa5, 0xc8, 0x0c, 0x02, 0xd7, 0x31, 0x1d, 0xa3, 0x6e, 0x38, 0x86, 0x96, 0x5e,
+	0x10, 0x66, 0x3a, 0x4d, 0xb7, 0x6e, 0x3b, 0x5a, 0x66, 0x05, 0xdc, 0xb6, 0x6a, 0x5a, 0x76, 0x41,
+	0x10, 0xc0, 0x9d, 0x17, 0x7d, 0x53, 0x96, 0x0f, 0x02, 0x71, 0xd0, 0x36, 0xba, 0xee, 0x41, 0xab,
+	0xae, 0xe5, 0x56, 0x21, 0xfa, 0xb5, 0xbe, 0xb6, 0xb9, 0x38, 0x8f, 0xbe, 0x5b, 0xb7, 0x6b, 0x7d,
+	0x2d, 0x4f, 0x5b, 0x9a, 0x02, 0x37, 0x6b, 0x5d, 0xad, 0xb0, 0x20, 0xa7, 0xd5, 0x77, 0xfb, 0x56,
+	0xcf, 0xe9, 0x69, 0x6c, 0x09, 0x71, 0xf0, 0x90, 0xeb, 0x5a, 0x5c, 0x85, 0x80, 0xc9, 0x6d, 0x2d,
+	0x8c, 0xec, 0xd4, 0xfa, 0x9c, 0x61, 0x7b, 0x05, 0x1c, 0xe8, 0x77, 0x16, 0xe0, 0xfb, 0x75, 0xa4,
+	0xbf, 0xb8, 0x02, 0x0e, 0xf4, 0xda, 0xc2, 0xc0, 0x76, 0xcd, 0x41, 0x86, 0x4b, 0xab, 0x10, 0x75,
+	0x5e, 0x56, 0x2c, 0xac, 0x5d, 0xad, 0x03, 0xca, 0x72, 0xcb, 0x5e, 0x5e, 0x8d, 0xab, 0xf5, 0xea,
+	0xa6, 0x76, 0x65, 0xc1, 0x56, 0x86, 0xd5, 0x77, 0x7b, 0x7d, 0xed, 0xea, 0x82, 0x62, 0x00, 0xb6,
+	0xfb, 0x86, 0x76, 0x6d, 0x05, 0xdc, 0xe9, 0x1b, 0xda, 0xf5, 0x55, 0xf4, 0x4d, 0x43, 0x2b, 0xad,
+	0xa2, 0x6f, 0x1a, 0xda, 0x8d, 0x65, 0xcb, 0x3e, 0xe2, 0x13, 0xbc, 0xb9, 0x0a, 0x01, 0x13, 0xdc,
+	0x5d, 0x9c, 0x04, 0x20, 0x1a, 0x6d, 0xa3, 0x6a, 0xb6, 0xb5, 0x5b, 0xab, 0x26, 0xf8, 0x08, 0x27,
+	0x7f, 0x7b, 0x35, 0x8e, 0x4f, 0xfe, 0x3d, 0xfd, 0x36, 0xbb, 0xb1, 0x28, 0xb3, 0x5b, 0x77, 0x1d,
+	0xc3, 0xda, 0x33, 0x1d, 0xed, 0xce, 0xaa, 0x21, 0xbb, 0x75, 0xd7, 0x6e, 0xb7, 0xb5, 0xbb, 0x6b,
+	0x70, 0x4e, 0xbb, 0xad, 0xdd, 0xa3, 0x5d, 0x5f, 0xc6, 0x4a, 0xbf, 0x6d, 0xbb, 0xa8, 0x69, 0x79,
+	0xc1, 0x1e, 0x1c, 0xe5, 0xd4, 0xb4, 0xaf, 0x2d, 0x86, 0x17, 0xc0, 0xab, 0x3d, 0x5b, 0x7b, 0x7f,
+	0x01, 0xd1, 0xaf, 0x56, 0xdd, 0x96, 0xdd, 0xaa, 0x6b, 0x1f, 0x50, 0x09, 0x24, 0x5d, 0x6d, 0xbf,
+	0xdb, 0x35, 0xdb, 0x6e, 0xab, 0xae, 0x7d, 0x7d, 0x95, 0x6a, 0xe6, 0x73, 0xa7, 0x59, 0xb7, 0xb4,
+	0x6f, 0x54, 0x3e, 0xc5, 0x53, 0x10, 0xff, 0x54, 0x7f, 0x3c, 0xd2, 0x2f, 0xf2, 0xe4, 0x7b, 0xd0,
+	0xaa, 0xbb, 0xdd, 0x5e, 0xd7, 0xe4, 0x5b, 0xdf, 0x0e, 0x01, 0xfa, 0x96, 0x69, 0x9b, 0x5d, 0x47,
+	0x7b, 0x7b, 0xb7, 0xf2, 0x1f, 0x52, 0xd8, 0x08, 0x1d, 0xcf, 0x4f, 0x1e, 0xd1, 0xa7, 0xe5, 0xe2,
+	0x3a, 0x2f, 0x50, 0xb7, 0xcc, 0xe6, 0xd2, 0xde, 0x06, 0x30, 0x10, 0xf9, 0x1c, 0x72, 0x07, 0xee,
+	0x93, 0x00, 0x32, 0xed, 0xbe, 0x96, 0xa6, 0x51, 0xe1, 0xd9, 0xd8, 0x77, 0x9a, 0x5a, 0x56, 0x01,
+	0xd4, 0xa1, 0x98, 0xcc, 0x2b, 0x00, 0x28, 0xba, 0x34, 0x4d, 0x91, 0x6a, 0xf5, 0xf6, 0x21, 0xbf,
+	0xdd, 0x55, 0xa4, 0x36, 0x7b, 0x7d, 0xed, 0x31, 0xed, 0x40, 0xf0, 0xbc, 0xdf, 0xb5, 0xcc, 0x3e,
+	0x6c, 0x67, 0x2a, 0xc8, 0x36, 0x9f, 0x42, 0xe1, 0xf1, 0xd3, 0x74, 0xe2, 0xdb, 0x5e, 0xfa, 0xfb,
+	0x65, 0x40, 0x66, 0xf0, 0xb3, 0x40, 0x7f, 0x1f, 0x32, 0x21, 0x2e, 0x93, 0x01, 0xc5, 0x72, 0xff,
+	0x85, 0xeb, 0x38, 0x6d, 0x7e, 0x4c, 0x28, 0x52, 0xb4, 0xa8, 0xf0, 0x56, 0x57, 0xa6, 0x03, 0x03,
+	0x4b, 0x5c, 0x5c, 0x54, 0xa7, 0x2d, 0xc3, 0xdb, 0x70, 0xdc, 0xba, 0x59, 0x8b, 0xe1, 0x1a, 0x15,
+	0x18, 0x86, 0xe3, 0xf6, 0xf7, 0xed, 0x26, 0xcf, 0x68, 0xda, 0x25, 0x32, 0x26, 0x00, 0x7b, 0x7d,
+	0x84, 0xe9, 0x0b, 0x84, 0x20, 0x41, 0xbb, 0x9c, 0x24, 0xe4, 0xb0, 0x2b, 0x31, 0x21, 0x68, 0xc0,
+	0x4b, 0x30, 0xed, 0x2a, 0x59, 0xd1, 0xa0, 0x23, 0x8c, 0x76, 0x8d, 0x76, 0x38, 0xa2, 0xea, 0x3e,
+	0xe3, 0xda, 0x5c, 0x8f, 0xa1, 0xa0, 0x25, 0x41, 0x4b, 0x49, 0x89, 0x8d, 0x96, 0xd9, 0xae, 0x6b,
+	0x37, 0x94, 0xa1, 0x41, 0x9f, 0x7e, 0xb5, 0xaa, 0xdd, 0xa4, 0xa5, 0x21, 0x75, 0x00, 0xb4, 0xab,
+	0x97, 0xc4, 0xbc, 0x97, 0xb6, 0xa4, 0x03, 0xbc, 0xf0, 0xa3, 0x34, 0x6a, 0xe9, 0x9b, 0x6d, 0x51,
+	0x65, 0x77, 0xda, 0x89, 0x23, 0x39, 0x23, 0x18, 0x14, 0xc1, 0xff, 0xed, 0x5d, 0x86, 0x4a, 0x03,
+	0x80, 0x74, 0x7b, 0x6e, 0x75, 0xbf, 0xd1, 0x20, 0xb9, 0xff, 0x59, 0xb8, 0xa8, 0xf2, 0x5d, 0x26,
+	0x5f, 0x5b, 0x72, 0x1c, 0xb5, 0xb2, 0xc6, 0xf9, 0xb6, 0x1c, 0x77, 0xaf, 0xe7, 0xf4, 0xe8, 0x18,
+	0x9f, 0xa2, 0x78, 0x6a, 0x39, 0xee, 0x33, 0xab, 0xe5, 0x98, 0xea, 0x0e, 0x87, 0x21, 0x28, 0x31,
+	0x46, 0xcd, 0x69, 0xf5, 0xba, 0xb6, 0x96, 0x89, 0x11, 0x46, 0xbf, 0xdf, 0x7e, 0x21, 0x11, 0xd9,
+	0x18, 0x51, 0x6b, 0x9b, 0x86, 0x25, 0x11, 0x1b, 0xc2, 0xaf, 0xe9, 0xdc, 0xa3, 0xe5, 0xc8, 0x52,
+	0xad, 0x15, 0x96, 0xfa, 0xeb, 0x38, 0xa1, 0xc5, 0xef, 0x31, 0xa9, 0xa0, 0x68, 0xd4, 0x12, 0x15,
+	0x4f, 0xa3, 0x26, 0xea, 0x1b, 0xb1, 0x53, 0x4b, 0x88, 0x6b, 0x3b, 0x56, 0xab, 0x06, 0xc7, 0x7c,
+	0x49, 0x4a, 0xc5, 0x51, 0x26, 0x26, 0x45, 0x88, 0x20, 0xcd, 0x56, 0xfe, 0x29, 0xbd, 0xff, 0x95,
+	0xa3, 0x63, 0xbc, 0xa3, 0x31, 0x1b, 0x6a, 0x29, 0x4b, 0x22, 0x1a, 0xae, 0x6d, 0x76, 0xeb, 0xf2,
+	0x00, 0x1e, 0xab, 0xd1, 0x70, 0x6b, 0x4d, 0xb3, 0xf6, 0xc4, 0xed, 0x1d, 0x98, 0x56, 0xdb, 0xe8,
+	0xcb, 0x82, 0xa1, 0xd1, 0x70, 0x21, 0xc1, 0x40, 0x24, 0xed, 0x77, 0x9d, 0xd8, 0x68, 0x8d, 0x06,
+	0x2f, 0xd9, 0x9f, 0x48, 0x44, 0x3e, 0x81, 0xa8, 0xbe, 0x90, 0x08, 0xad, 0x62, 0xe3, 0x11, 0x0a,
+	0xbf, 0x9c, 0xc7, 0xd9, 0xed, 0x2d, 0x35, 0x74, 0xf6, 0x94, 0x86, 0x8e, 0x80, 0xc4, 0xdd, 0x17,
+	0x09, 0x91, 0x0d, 0x95, 0xcf, 0xb1, 0x3c, 0x5c, 0xfa, 0xc2, 0x91, 0x0c, 0xbf, 0x97, 0x34, 0xfc,
+	0x9e, 0x62, 0x78, 0x09, 0x21, 0xfb, 0xa6, 0x2b, 0xb6, 0x7a, 0x45, 0x86, 0xbb, 0x23, 0x09, 0xc1,
+	0x53, 0x9c, 0x14, 0x02, 0x41, 0xd6, 0x36, 0x6b, 0x90, 0x2b, 0x31, 0x0c, 0xf6, 0xc0, 0x5f, 0xeb,
+	0x2d, 0xcb, 0xe4, 0x0b, 0xb7, 0x85, 0x4a, 0x3a, 0x6e, 0xa3, 0xa1, 0x65, 0x2a, 0x7d, 0x74, 0x8c,
+	0xc5, 0xef, 0x00, 0x69, 0x71, 0x2c, 0xb0, 0x52, 0xc7, 0x70, 0x6a, 0x4d, 0xed, 0x02, 0xb9, 0x9b,
+	0x70, 0x40, 0x79, 0xf0, 0xb3, 0x84, 0x91, 0x78, 0xa8, 0xa7, 0x2b, 0x7f, 0x2f, 0x85, 0x6f, 0xa8,
+	0x56, 0x7c, 0x61, 0x47, 0xab, 0x65, 0x59, 0x6e, 0xab, 0xde, 0x36, 0x5d, 0xa7, 0xd5, 0x31, 0x7b,
+	0x4a, 0x86, 0xb4, 0x2c, 0xb7, 0x69, 0x58, 0x75, 0x09, 0x17, 0x46, 0xb0, 0x64, 0x05, 0x9e, 0x8e,
+	0x29, 0xf1, 0x08, 0x29, 0x9d, 0x4f, 0xc2, 0xb1, 0x07, 0x40, 0xf0, 0x6c, 0x65, 0x46, 0x7f, 0x46,
+	0x8d, 0x5f, 0x2a, 0xa0, 0xf2, 0xd9, 0xfd, 0xa1, 0x69, 0xf5, 0xe4, 0x92, 0x76, 0x70, 0x49, 0xdf,
+	0xfe, 0xf4, 0xdd, 0xa6, 0x7e, 0x95, 0xcf, 0xba, 0xe3, 0xda, 0xed, 0xde, 0xb3, 0xbe, 0xe1, 0x34,
+	0xa9, 0x79, 0x86, 0x5d, 0xb5, 0x8e, 0xda, 0x55, 0x53, 0x3b, 0x68, 0x1d, 0x3c, 0x45, 0xf3, 0x05,
+	0x9f, 0x2e, 0x7d, 0xc3, 0xa5, 0x16, 0xf3, 0x55, 0x35, 0x73, 0xa0, 0x3d, 0x01, 0x46, 0xfd, 0x02,
+	0x9c, 0x03, 0x07, 0xd8, 0x35, 0x38, 0x0b, 0x77, 0x0c, 0xeb, 0x89, 0x26, 0x8a, 0x72, 0x80, 0x2f,
+	0xc5, 0xf5, 0xe7, 0xea, 0x07, 0x79, 0xcb, 0xfe, 0xd5, 0x49, 0xfa, 0x57, 0x67, 0xc9, 0xbf, 0x3a,
+	0x8a, 0x7f, 0x1d, 0xaa, 0xb7, 0x1e, 0xd4, 0x10, 0xed, 0x34, 0x12, 0x9d, 0x04, 0x86, 0xa0, 0x27,
+	0xd5, 0x3e, 0x9c, 0xfe, 0x69, 0x16, 0x0d, 0x88, 0xb2, 0xbe, 0x2d, 0xf7, 0xe3, 0x4e, 0xc3, 0xad,
+	0xee, 0x5b, 0xb6, 0x23, 0xf7, 0xe3, 0x4e, 0x43, 0x9c, 0xf7, 0x2b, 0xff, 0x8c, 0x2e, 0x5d, 0x62,
+	0xbf, 0x97, 0xdb, 0x07, 0xa7, 0x6e, 0x52, 0xb3, 0xd1, 0x6d, 0x18, 0xad, 0x36, 0x3f, 0x2f, 0xe1,
+	0x16, 0x69, 0x3a, 0x6e, 0xd5, 0xa8, 0xcb, 0xf6, 0x90, 0xf0, 0x3c, 0x02, 0x93, 0x3f, 0xa6, 0xa9,
+	0x52, 0x22, 0x68, 0xab, 0x6b, 0x3b, 0xd6, 0x3e, 0xa2, 0x32, 0xb4, 0xff, 0x10, 0x0a, 0x1d, 0x3a,
+	0x1b, 0xd3, 0x8b, 0x3e, 0x9d, 0x18, 0x77, 0x83, 0xaa, 0x1e, 0x53, 0xe9, 0xd7, 0x09, 0x5c, 0x2e,
+	0x66, 0x13, 0x7d, 0x3b, 0x81, 0xda, 0x8c, 0xd9, 0x64, 0xff, 0x4e, 0xe0, 0xf2, 0x31, 0x1b, 0xf6,
+	0x34, 0x7a, 0x7d, 0x81, 0x2a, 0xe8, 0xef, 0xb1, 0x9b, 0x88, 0xb2, 0x9f, 0xb5, 0x9c, 0x5a, 0x53,
+	0x34, 0xd5, 0x08, 0xcf, 0xa8, 0xb2, 0x34, 0x93, 0x6d, 0x35, 0x81, 0x2e, 0xc6, 0xa3, 0xca, 0xfe,
+	0x97, 0xc0, 0x6d, 0x51, 0xc7, 0x4e, 0x6a, 0x24, 0xbb, 0xa9, 0x44, 0xb0, 0x4d, 0x7b, 0x86, 0xb9,
+	0xc2, 0xb7, 0xaa, 0xea, 0x5f, 0x5d, 0x7d, 0x35, 0x18, 0x4f, 0xf8, 0xe5, 0x5b, 0xfe, 0x37, 0xc6,
+	0xc0, 0x1f, 0x9b, 0x8d, 0x9a, 0xdb, 0xea, 0xd6, 0x7a, 0x9d, 0xbe, 0xe1, 0xb4, 0x60, 0xd7, 0x13,
+	0x5e, 0x06, 0x08, 0xb3, 0x6f, 0x5a, 0x70, 0x42, 0xfd, 0xf3, 0x34, 0xe6, 0x97, 0x97, 0x83, 0x91,
+	0x78, 0xef, 0x8a, 0x32, 0x70, 0xc1, 0xab, 0x56, 0x8d, 0xaf, 0x08, 0xf5, 0xdd, 0x64, 0xb7, 0x44,
+	0xc0, 0x79, 0xd5, 0x2d, 0x76, 0x53, 0x01, 0x94, 0xbd, 0x4e, 0x2d, 0x4d, 0xcd, 0x60, 0x81, 0x49,
+	0x4c, 0x41, 0x6c, 0x48, 0x0a, 0x12, 0xe5, 0x89, 0x0e, 0x0f, 0x20, 0x50, 0xcf, 0x0d, 0x8a, 0x4f,
+	0x41, 0xda, 0x36, 0xbb, 0xf2, 0xa4, 0xc8, 0x61, 0xbc, 0x34, 0x70, 0xcd, 0x4e, 0xdf, 0x79, 0x21,
+	0x9b, 0xcc, 0x0a, 0x62, 0xbf, 0xfb, 0xa4, 0xdb, 0x7b, 0xd6, 0x95, 0xbb, 0x8b, 0x54, 0x9f, 0xdb,
+	0xbc, 0x05, 0x4b, 0x1c, 0xcf, 0xab, 0x65, 0xbb, 0x76, 0xdb, 0x38, 0x30, 0x35, 0xb6, 0x30, 0x59,
+	0x7e, 0x36, 0x16, 0x55, 0xa1, 0x04, 0xf2, 0x76, 0x93, 0xb6, 0xa5, 0xbf, 0xcf, 0xee, 0x12, 0x38,
+	0xee, 0xf5, 0xd2, 0xf0, 0xb0, 0x1b, 0x82, 0x0b, 0x6b, 0xdb, 0x95, 0xdf, 0xcf, 0x60, 0xfe, 0x01,
+	0x7b, 0x53, 0x51, 0xca, 0xcd, 0x4d, 0x23, 0x19, 0x8a, 0x59, 0x45, 0xcf, 0x52, 0x00, 0x61, 0xd2,
+	0x29, 0x61, 0x50, 0x63, 0x85, 0x41, 0x45, 0xed, 0xa2, 0x20, 0x51, 0x52, 0x66, 0x01, 0xd1, 0xdb,
+	0xc7, 0xd8, 0x90, 0xdb, 0xb0, 0x40, 0x18, 0xd6, 0xde, 0x3e, 0x08, 0xd3, 0x36, 0xc4, 0x12, 0x18,
+	0x62, 0x09, 0x72, 0x8a, 0x8a, 0x4e, 0x0f, 0x36, 0x9d, 0x2e, 0x98, 0x1a, 0x03, 0x5d, 0xf0, 0x63,
+	0x29, 0x9a, 0x17, 0xfe, 0xa0, 0x0c, 0x87, 0x35, 0x69, 0x81, 0x22, 0x05, 0x30, 0x3c, 0xc8, 0xb9,
+	0x83, 0x76, 0xed, 0x96, 0xed, 0xc0, 0xa8, 0x4c, 0xbf, 0xc5, 0x4a, 0x84, 0xde, 0xef, 0xda, 0xfb,
+	0x7d, 0x50, 0xd2, 0xac, 0xbb, 0x3d, 0xab, 0x6e, 0x5a, 0x5a, 0x71, 0xc1, 0x1e, 0x8e, 0xb1, 0xa7,
+	0x6d, 0x2d, 0x4c, 0x00, 0x4a, 0x0c, 0x3e, 0x65, 0x71, 0x38, 0x57, 0x11, 0x60, 0xc0, 0x9d, 0x05,
+	0x03, 0xf2, 0x2e, 0xb5, 0x98, 0xf5, 0xc5, 0xca, 0x5f, 0xa4, 0x58, 0x49, 0x2c, 0x8f, 0x5a, 0x5c,
+	0x2a, 0x61, 0x55, 0x6d, 0xd5, 0x84, 0x3f, 0xf1, 0x1c, 0x26, 0x93, 0x20, 0x22, 0xec, 0xfd, 0x3e,
+	0x82, 0x53, 0x0a, 0x7d, 0xc2, 0xd7, 0x44, 0x1e, 0x8c, 0xe9, 0x65, 0xf5, 0x99, 0xa1, 0x4c, 0xb3,
+	0x8c, 0xc2, 0x3e, 0x72, 0x56, 0x68, 0xdf, 0x5a, 0xb1, 0xfc, 0x1b, 0x0b, 0x03, 0xca, 0xe5, 0xcf,
+	0x09, 0xc3, 0xb5, 0x62, 0x47, 0xda, 0x14, 0x0b, 0xdc, 0x12, 0x0b, 0x9c, 0xaf, 0xfc, 0x73, 0xfa,
+	0x9c, 0x02, 0x26, 0x8f, 0x7d, 0x2e, 0xd5, 0x35, 0x3b, 0xab, 0x5c, 0xb3, 0xa3, 0xba, 0x66, 0x12,
+	0x06, 0xcb, 0x23, 0xe3, 0x9f, 0x60, 0xf5, 0x36, 0x6c, 0x77, 0x16, 0x35, 0xc5, 0x17, 0x90, 0xdd,
+	0x67, 0x0a, 0x32, 0x2b, 0x7c, 0x88, 0x90, 0xcf, 0x5a, 0xed, 0x7a, 0xcd, 0xb0, 0xea, 0x50, 0x56,
+	0x93, 0xcf, 0x11, 0x06, 0x0f, 0x2b, 0xb9, 0x05, 0xe8, 0x81, 0xd1, 0xde, 0x37, 0xb5, 0xcd, 0x05,
+	0xe5, 0xb9, 0x68, 0xd1, 0x31, 0x12, 0xc0, 0xbe, 0x65, 0x5a, 0xe6, 0x53, 0xad, 0xa0, 0x48, 0xa8,
+	0xef, 0xf7, 0x49, 0x2e, 0x13, 0x76, 0xea, 0x08, 0x3b, 0x15, 0x2b, 0x7f, 0x44, 0x4e, 0x12, 0x97,
+	0xcb, 0x4a, 0xee, 0xc5, 0x01, 0x1b, 0x9d, 0x86, 0xf4, 0x12, 0x59, 0x3e, 0x71, 0x20, 0xa5, 0xf9,
+	0xfd, 0x76, 0x5b, 0xe6, 0x4d, 0x0e, 0x5f, 0x70, 0x11, 0x45, 0x8c, 0xa8, 0xa5, 0x33, 0xa2, 0x20,
+	0xef, 0xc8, 0xfc, 0x2d, 0xcb, 0x68, 0x29, 0x81, 0x2a, 0xb3, 0x8d, 0x45, 0x44, 0xad, 0xd7, 0xe9,
+	0x18, 0x5d, 0xb0, 0x13, 0x4e, 0x5e, 0x22, 0x1a, 0x6d, 0x63, 0xcf, 0xd6, 0x36, 0x2b, 0x7f, 0x90,
+	0xc1, 0xef, 0xf1, 0xe2, 0x4a, 0x58, 0x9d, 0x15, 0x2a, 0xba, 0x07, 0x4c, 0xb8, 0xe1, 0x9a, 0xcf,
+	0x5b, 0xb6, 0x63, 0xcb, 0x77, 0x1e, 0x1c, 0x23, 0xca, 0x4c, 0x8c, 0xf5, 0x14, 0xf9, 0x32, 0x47,
+	0x3d, 0x33, 0x5b, 0x7b, 0x4d, 0x47, 0x0d, 0x6a, 0x19, 0x06, 0x1c, 0x0f, 0x29, 0xa2, 0xd7, 0x40,
+	0x4e, 0x38, 0x6b, 0xe1, 0x8e, 0xa9, 0xa2, 0xaa, 0xfb, 0x90, 0x67, 0xe1, 0xe4, 0x70, 0x97, 0xdd,
+	0x12, 0xb8, 0x5a, 0xd3, 0x68, 0x75, 0x5b, 0xdd, 0xbd, 0x84, 0xe0, 0x0d, 0x4a, 0x32, 0x38, 0x30,
+	0xcf, 0x32, 0x2a, 0x3a, 0x27, 0xca, 0x70, 0x40, 0xb7, 0x7b, 0xbd, 0xbe, 0xdc, 0x30, 0xf6, 0x94,
+	0x45, 0xa3, 0x49, 0xe4, 0x55, 0x14, 0x1f, 0xcd, 0xac, 0xcb, 0x5c, 0x86, 0xfe, 0xb2, 0x27, 0x6d,
+	0x0f, 0x91, 0x21, 0xda, 0x8b, 0x7b, 0x8b, 0x86, 0x2f, 0x92, 0x13, 0x48, 0x04, 0x4e, 0x48, 0xdb,
+	0xa2, 0x05, 0x91, 0x70, 0xae, 0xb1, 0x7c, 0x47, 0xb9, 0x17, 0x2f, 0xf6, 0x4e, 0xe5, 0x77, 0xc9,
+	0xf1, 0xc4, 0xdf, 0x3f, 0x4e, 0x2c, 0x11, 0x6a, 0xd3, 0x17, 0x62, 0xa8, 0xc9, 0x8b, 0xda, 0x48,
+	0x68, 0x13, 0x63, 0x4c, 0xd6, 0xb2, 0xfd, 0x58, 0x4d, 0xfe, 0xc2, 0x55, 0x2c, 0x8a, 0x84, 0x1b,
+	0xf5, 0x03, 0xd3, 0x72, 0x5a, 0xb6, 0x29, 0xdd, 0xaf, 0xaf, 0xb8, 0x5f, 0xe5, 0xaf, 0xa2, 0xd3,
+	0xc8, 0xbf, 0x3a, 0x9e, 0xd0, 0x88, 0xde, 0x35, 0x26, 0xbc, 0x5b, 0x06, 0x83, 0xb3, 0x30, 0xb2,
+	0x78, 0x27, 0xe2, 0xc4, 0xe2, 0xd3, 0x95, 0x1f, 0xe2, 0x7c, 0xf1, 0x4e, 0x9b, 0x3f, 0x5f, 0x31,
+	0xdf, 0xa7, 0xbd, 0xe4, 0x7c, 0x71, 0x4c, 0x09, 0xc5, 0x0d, 0x49, 0xc8, 0xe6, 0x60, 0x21, 0xfb,
+	0xaf, 0xb1, 0xdb, 0x4b, 0x7f, 0x7f, 0x7d, 0x85, 0xfa, 0x76, 0x2d, 0x11, 0x28, 0xa2, 0x00, 0x92,
+	0x60, 0x4c, 0x7d, 0x28, 0x9f, 0x03, 0x63, 0xdd, 0x6f, 0x2d, 0xde, 0x68, 0x4b, 0x88, 0xa7, 0x03,
+	0x9c, 0xd5, 0xa8, 0x41, 0xdd, 0xcd, 0x2d, 0xa3, 0x80, 0xb8, 0xc7, 0xc6, 0x47, 0x38, 0x8b, 0x46,
+	0x83, 0xfa, 0x52, 0x4b, 0x57, 0xfe, 0x30, 0x8d, 0x76, 0x8f, 0x8f, 0x15, 0xcb, 0x29, 0xa8, 0x93,
+	0x4c, 0x41, 0x18, 0xc1, 0x1c, 0x88, 0x55, 0x28, 0x45, 0x70, 0x8a, 0x56, 0xbc, 0xa3, 0x46, 0x30,
+	0xf6, 0x2b, 0xd2, 0x2a, 0x4a, 0xc4, 0x05, 0xa2, 0x44, 0x45, 0xd1, 0x59, 0x74, 0xf3, 0x2c, 0x99,
+	0xad, 0x93, 0xcc, 0x2f, 0x22, 0x69, 0x4b, 0xb0, 0x65, 0x38, 0xa6, 0x4c, 0x46, 0x9d, 0x38, 0x26,
+	0x2c, 0x7e, 0x4b, 0x60, 0x81, 0xb8, 0x0a, 0x92, 0xf3, 0xb4, 0x5d, 0x24, 0xa0, 0x6e, 0xdd, 0x74,
+	0x8c, 0x56, 0x5b, 0x2b, 0xa8, 0xaa, 0x52, 0xc6, 0xe0, 0x9a, 0xda, 0x1a, 0x53, 0xa7, 0x2e, 0x92,
+	0x89, 0xd1, 0xad, 0xdb, 0x5a, 0xb1, 0xf2, 0x2f, 0x52, 0x2b, 0xbe, 0xb0, 0x0c, 0x57, 0x39, 0x71,
+	0x63, 0xc1, 0x89, 0xe9, 0xfd, 0xb7, 0x00, 0xcb, 0x1d, 0x5c, 0xac, 0x58, 0xcc, 0x00, 0x59, 0x41,
+	0x5e, 0xba, 0x68, 0x28, 0x5e, 0x93, 0x59, 0x14, 0x22, 0xeb, 0x90, 0xac, 0x88, 0x85, 0x86, 0xf4,
+	0xa7, 0x8d, 0xca, 0x7f, 0xa2, 0xdd, 0x39, 0xf9, 0xf7, 0x17, 0xc4, 0x71, 0x0f, 0x4e, 0xda, 0x76,
+	0x2d, 0x3e, 0xfe, 0xf1, 0x7b, 0x28, 0xcf, 0xe4, 0x3b, 0xee, 0x4e, 0xdf, 0x35, 0xf6, 0xf6, 0x2c,
+	0x73, 0xcf, 0xe0, 0x87, 0x74, 0x3a, 0xf1, 0x89, 0x5b, 0x2d, 0x19, 0x61, 0xf0, 0x7e, 0xf2, 0x6d,
+	0xb0, 0x24, 0xc3, 0x30, 0xda, 0x88, 0x01, 0x98, 0x02, 0x73, 0x31, 0x9f, 0x38, 0xed, 0xdb, 0x35,
+	0x6d, 0x53, 0x18, 0x5c, 0x40, 0xc5, 0x99, 0x46, 0x76, 0x7a, 0x3b, 0x7d, 0x72, 0xa3, 0x82, 0x38,
+	0x52, 0x13, 0x40, 0x24, 0x03, 0x16, 0x8b, 0x40, 0xb8, 0x14, 0x51, 0x8c, 0x31, 0xc9, 0x03, 0x93,
+	0xbc, 0xea, 0x21, 0x26, 0xc1, 0x75, 0x11, 0xc7, 0xa7, 0x4e, 0x7f, 0xd5, 0xd1, 0x7c, 0x77, 0xe5,
+	0xdf, 0xdd, 0x70, 0xc5, 0xdf, 0x10, 0x40, 0xc6, 0x06, 0x9c, 0xe7, 0x96, 0x5e, 0x17, 0x0b, 0x78,
+	0xa7, 0x67, 0x99, 0x5a, 0xaa, 0xd2, 0xa6, 0x78, 0x4c, 0xfe, 0x2d, 0x0d, 0x92, 0x24, 0x34, 0x6e,
+	0xe0, 0x1d, 0x09, 0x45, 0x16, 0xb9, 0xbf, 0xc4, 0x90, 0xb4, 0x3f, 0xcb, 0xa0, 0x6a, 0x6b, 0xbe,
+	0x32, 0x97, 0x7e, 0xd3, 0x77, 0xd4, 0x53, 0x34, 0x24, 0x27, 0xdc, 0xf9, 0x96, 0x30, 0x6e, 0xa7,
+	0x65, 0xdb, 0xb2, 0x22, 0xe5, 0xe8, 0xae, 0xf9, 0x9c, 0xce, 0x9c, 0xb6, 0x96, 0xa6, 0xba, 0x7b,
+	0x11, 0x81, 0x6c, 0x19, 0x71, 0xaf, 0x01, 0xb0, 0xc9, 0xa6, 0x68, 0x96, 0xf6, 0xf8, 0x65, 0x14,
+	0xb2, 0x6e, 0xa8, 0xac, 0xc9, 0xb6, 0x69, 0x4e, 0x65, 0x4d, 0xa0, 0x90, 0x75, 0x53, 0xc6, 0x40,
+	0xdf, 0xa1, 0x86, 0x40, 0x5e, 0x06, 0x23, 0x8c, 0x26, 0x0b, 0x42, 0x26, 0x2e, 0xaa, 0xc4, 0x4a,
+	0xd8, 0xa6, 0x83, 0xe5, 0x9b, 0x38, 0x5f, 0xaf, 0xc0, 0xe1, 0x30, 0xdb, 0x2a, 0x33, 0xaa, 0x21,
+	0x99, 0x77, 0x54, 0xe6, 0x24, 0x0e, 0x99, 0x2f, 0xea, 0x37, 0xe3, 0x95, 0x48, 0xf8, 0xd7, 0xcf,
+	0xde, 0x65, 0xf4, 0x3b, 0xf1, 0x5a, 0xa8, 0x38, 0x64, 0x05, 0x07, 0xfc, 0x3d, 0xfa, 0xc3, 0x23,
+	0x58, 0x72, 0x25, 0x6e, 0x76, 0x50, 0x5f, 0xb0, 0x51, 0x5b, 0xba, 0x05, 0x03, 0x30, 0x6c, 0x1f,
+	0x52, 0x51, 0xa5, 0xa5, 0x44, 0xb5, 0x14, 0x63, 0xda, 0xad, 0x03, 0xb3, 0x6b, 0xda, 0xf1, 0x35,
+	0x8f, 0x3d, 0xa5, 0x58, 0xd2, 0xb2, 0x0a, 0x83, 0xac, 0xa0, 0x78, 0xdf, 0xd6, 0xd6, 0xf2, 0x95,
+	0x2f, 0xb0, 0x21, 0x10, 0x5f, 0xe4, 0xc7, 0xbb, 0xfb, 0x62, 0x0b, 0x55, 0x1b, 0x64, 0xa8, 0xe5,
+	0x53, 0xc7, 0xed, 0xb4, 0xba, 0x98, 0xd1, 0x53, 0x0a, 0xcc, 0x78, 0x8e, 0xb0, 0x34, 0xc5, 0xe0,
+	0xd3, 0x15, 0x2d, 0x8c, 0x1f, 0xe1, 0x69, 0x78, 0xe1, 0x26, 0x37, 0xf9, 0x69, 0xcd, 0xc2, 0x7e,
+	0x4a, 0xb7, 0x57, 0x6b, 0x1a, 0xdd, 0x3d, 0x53, 0x36, 0xf3, 0x05, 0xc2, 0x7c, 0xba, 0x6f, 0xb4,
+	0xe5, 0x45, 0x37, 0x01, 0xed, 0x18, 0x36, 0xee, 0x5e, 0x49, 0x62, 0x3c, 0xd3, 0x67, 0xaa, 0xfb,
+	0xec, 0x3d, 0x3f, 0x38, 0xe4, 0xd7, 0x08, 0x87, 0x7e, 0x30, 0xfa, 0x08, 0xff, 0xb3, 0x1d, 0x79,
+	0xad, 0xf0, 0x93, 0x4f, 0x7f, 0xf8, 0xe9, 0xe1, 0x38, 0x3a, 0x3a, 0x7e, 0xf9, 0xd1, 0xd0, 0x9f,
+	0xde, 0x17, 0x64, 0xf7, 0x91, 0xec, 0x57, 0xe8, 0xff, 0xe4, 0x39, 0xf9, 0xec, 0xfe, 0xa1, 0xaf,
+	0xfe, 0xcf, 0x3c, 0x2f, 0x73, 0x1c, 0xf3, 0xe9, 0xff, 0x09, 0x00, 0x00, 0xff, 0xff, 0x8e, 0xea,
+	0x36, 0xd9, 0xbd, 0x67, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/voltha/adapter.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/adapter.pb.go
new file mode 100644
index 0000000..04a40f2
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/adapter.pb.go
@@ -0,0 +1,266 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/adapter.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	any "github.com/golang/protobuf/ptypes/any"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type AdapterConfig struct {
+	// Custom (vendor-specific) configuration attributes
+	AdditionalConfig     *any.Any `protobuf:"bytes,64,opt,name=additional_config,json=additionalConfig,proto3" json:"additional_config,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *AdapterConfig) Reset()         { *m = AdapterConfig{} }
+func (m *AdapterConfig) String() string { return proto.CompactTextString(m) }
+func (*AdapterConfig) ProtoMessage()    {}
+func (*AdapterConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7e998ce153307274, []int{0}
+}
+
+func (m *AdapterConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_AdapterConfig.Unmarshal(m, b)
+}
+func (m *AdapterConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_AdapterConfig.Marshal(b, m, deterministic)
+}
+func (m *AdapterConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_AdapterConfig.Merge(m, src)
+}
+func (m *AdapterConfig) XXX_Size() int {
+	return xxx_messageInfo_AdapterConfig.Size(m)
+}
+func (m *AdapterConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_AdapterConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_AdapterConfig proto.InternalMessageInfo
+
+func (m *AdapterConfig) GetAdditionalConfig() *any.Any {
+	if m != nil {
+		return m.AdditionalConfig
+	}
+	return nil
+}
+
+// Adapter (software plugin)
+type Adapter struct {
+	// the adapter ID has to be unique,
+	// it will be generated as Type + CurrentReplica
+	Id      string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Vendor  string `protobuf:"bytes,2,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	Version string `protobuf:"bytes,3,opt,name=version,proto3" json:"version,omitempty"`
+	// Adapter configuration
+	Config *AdapterConfig `protobuf:"bytes,16,opt,name=config,proto3" json:"config,omitempty"`
+	// Custom descriptors and custom configuration
+	AdditionalDescription *any.Any `protobuf:"bytes,64,opt,name=additional_description,json=additionalDescription,proto3" json:"additional_description,omitempty"`
+	LogicalDeviceIds      []string `protobuf:"bytes,4,rep,name=logical_device_ids,json=logicalDeviceIds,proto3" json:"logical_device_ids,omitempty"`
+	// timestamp when the adapter last sent a message to the core
+	LastCommunication int64  `protobuf:"varint,5,opt,name=last_communication,json=lastCommunication,proto3" json:"last_communication,omitempty"`
+	CurrentReplica    int32  `protobuf:"varint,6,opt,name=currentReplica,proto3" json:"currentReplica,omitempty"`
+	TotalReplicas     int32  `protobuf:"varint,7,opt,name=totalReplicas,proto3" json:"totalReplicas,omitempty"`
+	Endpoint          string `protobuf:"bytes,8,opt,name=endpoint,proto3" json:"endpoint,omitempty"`
+	// all replicas of the same adapter will have the same type
+	// it is used to associate a device to an adapter
+	Type                 string   `protobuf:"bytes,9,opt,name=type,proto3" json:"type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Adapter) Reset()         { *m = Adapter{} }
+func (m *Adapter) String() string { return proto.CompactTextString(m) }
+func (*Adapter) ProtoMessage()    {}
+func (*Adapter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7e998ce153307274, []int{1}
+}
+
+func (m *Adapter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Adapter.Unmarshal(m, b)
+}
+func (m *Adapter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Adapter.Marshal(b, m, deterministic)
+}
+func (m *Adapter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Adapter.Merge(m, src)
+}
+func (m *Adapter) XXX_Size() int {
+	return xxx_messageInfo_Adapter.Size(m)
+}
+func (m *Adapter) XXX_DiscardUnknown() {
+	xxx_messageInfo_Adapter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Adapter proto.InternalMessageInfo
+
+func (m *Adapter) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *Adapter) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *Adapter) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Adapter) GetConfig() *AdapterConfig {
+	if m != nil {
+		return m.Config
+	}
+	return nil
+}
+
+func (m *Adapter) GetAdditionalDescription() *any.Any {
+	if m != nil {
+		return m.AdditionalDescription
+	}
+	return nil
+}
+
+func (m *Adapter) GetLogicalDeviceIds() []string {
+	if m != nil {
+		return m.LogicalDeviceIds
+	}
+	return nil
+}
+
+func (m *Adapter) GetLastCommunication() int64 {
+	if m != nil {
+		return m.LastCommunication
+	}
+	return 0
+}
+
+func (m *Adapter) GetCurrentReplica() int32 {
+	if m != nil {
+		return m.CurrentReplica
+	}
+	return 0
+}
+
+func (m *Adapter) GetTotalReplicas() int32 {
+	if m != nil {
+		return m.TotalReplicas
+	}
+	return 0
+}
+
+func (m *Adapter) GetEndpoint() string {
+	if m != nil {
+		return m.Endpoint
+	}
+	return ""
+}
+
+func (m *Adapter) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+type Adapters struct {
+	Items                []*Adapter `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}   `json:"-"`
+	XXX_unrecognized     []byte     `json:"-"`
+	XXX_sizecache        int32      `json:"-"`
+}
+
+func (m *Adapters) Reset()         { *m = Adapters{} }
+func (m *Adapters) String() string { return proto.CompactTextString(m) }
+func (*Adapters) ProtoMessage()    {}
+func (*Adapters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_7e998ce153307274, []int{2}
+}
+
+func (m *Adapters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Adapters.Unmarshal(m, b)
+}
+func (m *Adapters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Adapters.Marshal(b, m, deterministic)
+}
+func (m *Adapters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Adapters.Merge(m, src)
+}
+func (m *Adapters) XXX_Size() int {
+	return xxx_messageInfo_Adapters.Size(m)
+}
+func (m *Adapters) XXX_DiscardUnknown() {
+	xxx_messageInfo_Adapters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Adapters proto.InternalMessageInfo
+
+func (m *Adapters) GetItems() []*Adapter {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*AdapterConfig)(nil), "adapter.AdapterConfig")
+	proto.RegisterType((*Adapter)(nil), "adapter.Adapter")
+	proto.RegisterType((*Adapters)(nil), "adapter.Adapters")
+}
+
+func init() { proto.RegisterFile("voltha_protos/adapter.proto", fileDescriptor_7e998ce153307274) }
+
+var fileDescriptor_7e998ce153307274 = []byte{
+	// 413 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x52, 0x4f, 0x6b, 0xdb, 0x30,
+	0x14, 0xc7, 0x49, 0xf3, 0xef, 0x95, 0x94, 0x54, 0x6c, 0x41, 0x6b, 0x2f, 0x26, 0x8c, 0xe2, 0xc3,
+	0x2a, 0x43, 0xc6, 0xee, 0x4b, 0xdb, 0xcb, 0xd8, 0x4d, 0x87, 0x1d, 0xc6, 0x20, 0x28, 0x92, 0xea,
+	0x0a, 0x1c, 0x3d, 0x23, 0x29, 0x86, 0x7c, 0x9e, 0x7d, 0xd1, 0x51, 0x59, 0x5e, 0xda, 0x1e, 0x7a,
+	0xf3, 0xef, 0xaf, 0xdf, 0x7b, 0x08, 0xae, 0x5b, 0xac, 0xc3, 0x93, 0xd8, 0x36, 0x0e, 0x03, 0xfa,
+	0x52, 0x28, 0xd1, 0x04, 0xed, 0x58, 0x84, 0x64, 0x92, 0xe0, 0xd5, 0xa7, 0x0a, 0xb1, 0xaa, 0x75,
+	0x19, 0xe9, 0xdd, 0xe1, 0xb1, 0x14, 0xf6, 0xd8, 0x79, 0x56, 0x1c, 0xe6, 0x9b, 0xce, 0x75, 0x8f,
+	0xf6, 0xd1, 0x54, 0x64, 0x03, 0x97, 0x42, 0x29, 0x13, 0x0c, 0x5a, 0x51, 0x6f, 0x65, 0x24, 0xe9,
+	0xf7, 0x3c, 0x2b, 0xce, 0xd7, 0x1f, 0x58, 0xd7, 0xc3, 0xfa, 0x1e, 0xb6, 0xb1, 0x47, 0xbe, 0x38,
+	0xd9, 0xbb, 0x8a, 0xd5, 0xdf, 0x21, 0x4c, 0x52, 0x29, 0xb9, 0x80, 0x81, 0x51, 0x34, 0xcb, 0xb3,
+	0x62, 0xc6, 0x07, 0x46, 0x91, 0x25, 0x8c, 0x5b, 0x6d, 0x15, 0x3a, 0x3a, 0x88, 0x5c, 0x42, 0x84,
+	0xc2, 0xa4, 0xd5, 0xce, 0x1b, 0xb4, 0x74, 0x18, 0x85, 0x1e, 0x12, 0x06, 0xe3, 0x34, 0xc5, 0x22,
+	0x4e, 0xb1, 0x64, 0xfd, 0x96, 0xaf, 0x06, 0xe7, 0xc9, 0x45, 0x7e, 0xc2, 0xf2, 0xc5, 0x02, 0x4a,
+	0x7b, 0xe9, 0x4c, 0xf3, 0x8c, 0xde, 0xdd, 0xe2, 0xe3, 0x29, 0xf3, 0x70, 0x8a, 0x90, 0x2f, 0x40,
+	0x6a, 0xac, 0x8c, 0x8c, 0x4d, 0xad, 0x91, 0x7a, 0x6b, 0x94, 0xa7, 0x67, 0xf9, 0xb0, 0x98, 0xf1,
+	0x45, 0x52, 0x1e, 0xa2, 0xf0, 0x43, 0x79, 0x72, 0x0b, 0xa4, 0x16, 0x3e, 0x6c, 0x25, 0xee, 0xf7,
+	0x07, 0x6b, 0xa4, 0x88, 0xbf, 0x1d, 0xe5, 0x59, 0x31, 0xe4, 0x97, 0xcf, 0xca, 0xfd, 0x4b, 0x81,
+	0xdc, 0xc0, 0x85, 0x3c, 0x38, 0xa7, 0x6d, 0xe0, 0xba, 0xa9, 0x8d, 0x14, 0x74, 0x9c, 0x67, 0xc5,
+	0x88, 0xbf, 0x61, 0xc9, 0x67, 0x98, 0x07, 0x0c, 0xa2, 0x4e, 0xd8, 0xd3, 0x49, 0xb4, 0xbd, 0x26,
+	0xc9, 0x15, 0x4c, 0xb5, 0x55, 0x0d, 0x1a, 0x1b, 0xe8, 0x34, 0x9e, 0xf0, 0x3f, 0x26, 0x04, 0xce,
+	0xc2, 0xb1, 0xd1, 0x74, 0x16, 0xf9, 0xf8, 0xbd, 0x5a, 0xc3, 0x34, 0x1d, 0xd0, 0x93, 0x1b, 0x18,
+	0x99, 0xa0, 0xf7, 0x9e, 0x66, 0xf9, 0xb0, 0x38, 0x5f, 0x2f, 0xde, 0x9e, 0x98, 0x77, 0xf2, 0xdd,
+	0x1f, 0xb8, 0x46, 0x57, 0x31, 0x6c, 0xb4, 0x95, 0xe8, 0x14, 0xeb, 0x5e, 0x5f, 0xef, 0xbe, 0x9b,
+	0xff, 0x8a, 0x38, 0x85, 0x7e, 0xb3, 0xca, 0x84, 0xa7, 0xc3, 0x8e, 0x49, 0xdc, 0x97, 0x7d, 0xa4,
+	0xec, 0x22, 0xb7, 0xe9, 0xc1, 0xb6, 0xdf, 0xca, 0x0a, 0x13, 0xb7, 0x1b, 0x47, 0xf2, 0xeb, 0xbf,
+	0x00, 0x00, 0x00, 0xff, 0xff, 0x41, 0x02, 0xed, 0xf9, 0xd5, 0x02, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/voltha/device.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/device.pb.go
new file mode 100644
index 0000000..87573f0
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/device.pb.go
@@ -0,0 +1,2588 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/device.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	any "github.com/golang/protobuf/ptypes/any"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type PmConfig_PmType int32
+
+const (
+	PmConfig_COUNTER PmConfig_PmType = 0
+	PmConfig_GAUGE   PmConfig_PmType = 1
+	PmConfig_STATE   PmConfig_PmType = 2
+	PmConfig_CONTEXT PmConfig_PmType = 3
+)
+
+var PmConfig_PmType_name = map[int32]string{
+	0: "COUNTER",
+	1: "GAUGE",
+	2: "STATE",
+	3: "CONTEXT",
+}
+
+var PmConfig_PmType_value = map[string]int32{
+	"COUNTER": 0,
+	"GAUGE":   1,
+	"STATE":   2,
+	"CONTEXT": 3,
+}
+
+func (x PmConfig_PmType) String() string {
+	return proto.EnumName(PmConfig_PmType_name, int32(x))
+}
+
+func (PmConfig_PmType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{2, 0}
+}
+
+type ImageDownload_ImageDownloadState int32
+
+const (
+	ImageDownload_DOWNLOAD_UNKNOWN     ImageDownload_ImageDownloadState = 0
+	ImageDownload_DOWNLOAD_SUCCEEDED   ImageDownload_ImageDownloadState = 1
+	ImageDownload_DOWNLOAD_REQUESTED   ImageDownload_ImageDownloadState = 2
+	ImageDownload_DOWNLOAD_STARTED     ImageDownload_ImageDownloadState = 3
+	ImageDownload_DOWNLOAD_FAILED      ImageDownload_ImageDownloadState = 4
+	ImageDownload_DOWNLOAD_UNSUPPORTED ImageDownload_ImageDownloadState = 5
+	ImageDownload_DOWNLOAD_CANCELLED   ImageDownload_ImageDownloadState = 6
+)
+
+var ImageDownload_ImageDownloadState_name = map[int32]string{
+	0: "DOWNLOAD_UNKNOWN",
+	1: "DOWNLOAD_SUCCEEDED",
+	2: "DOWNLOAD_REQUESTED",
+	3: "DOWNLOAD_STARTED",
+	4: "DOWNLOAD_FAILED",
+	5: "DOWNLOAD_UNSUPPORTED",
+	6: "DOWNLOAD_CANCELLED",
+}
+
+var ImageDownload_ImageDownloadState_value = map[string]int32{
+	"DOWNLOAD_UNKNOWN":     0,
+	"DOWNLOAD_SUCCEEDED":   1,
+	"DOWNLOAD_REQUESTED":   2,
+	"DOWNLOAD_STARTED":     3,
+	"DOWNLOAD_FAILED":      4,
+	"DOWNLOAD_UNSUPPORTED": 5,
+	"DOWNLOAD_CANCELLED":   6,
+}
+
+func (x ImageDownload_ImageDownloadState) String() string {
+	return proto.EnumName(ImageDownload_ImageDownloadState_name, int32(x))
+}
+
+func (ImageDownload_ImageDownloadState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{6, 0}
+}
+
+type ImageDownload_ImageDownloadFailureReason int32
+
+const (
+	ImageDownload_NO_ERROR           ImageDownload_ImageDownloadFailureReason = 0
+	ImageDownload_INVALID_URL        ImageDownload_ImageDownloadFailureReason = 1
+	ImageDownload_DEVICE_BUSY        ImageDownload_ImageDownloadFailureReason = 2
+	ImageDownload_INSUFFICIENT_SPACE ImageDownload_ImageDownloadFailureReason = 3
+	ImageDownload_UNKNOWN_ERROR      ImageDownload_ImageDownloadFailureReason = 4
+	ImageDownload_CANCELLED          ImageDownload_ImageDownloadFailureReason = 5
+)
+
+var ImageDownload_ImageDownloadFailureReason_name = map[int32]string{
+	0: "NO_ERROR",
+	1: "INVALID_URL",
+	2: "DEVICE_BUSY",
+	3: "INSUFFICIENT_SPACE",
+	4: "UNKNOWN_ERROR",
+	5: "CANCELLED",
+}
+
+var ImageDownload_ImageDownloadFailureReason_value = map[string]int32{
+	"NO_ERROR":           0,
+	"INVALID_URL":        1,
+	"DEVICE_BUSY":        2,
+	"INSUFFICIENT_SPACE": 3,
+	"UNKNOWN_ERROR":      4,
+	"CANCELLED":          5,
+}
+
+func (x ImageDownload_ImageDownloadFailureReason) String() string {
+	return proto.EnumName(ImageDownload_ImageDownloadFailureReason_name, int32(x))
+}
+
+func (ImageDownload_ImageDownloadFailureReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{6, 1}
+}
+
+type ImageDownload_ImageActivateState int32
+
+const (
+	ImageDownload_IMAGE_UNKNOWN    ImageDownload_ImageActivateState = 0
+	ImageDownload_IMAGE_INACTIVE   ImageDownload_ImageActivateState = 1
+	ImageDownload_IMAGE_ACTIVATING ImageDownload_ImageActivateState = 2
+	ImageDownload_IMAGE_ACTIVE     ImageDownload_ImageActivateState = 3
+	ImageDownload_IMAGE_REVERTING  ImageDownload_ImageActivateState = 4
+	ImageDownload_IMAGE_REVERTED   ImageDownload_ImageActivateState = 5
+)
+
+var ImageDownload_ImageActivateState_name = map[int32]string{
+	0: "IMAGE_UNKNOWN",
+	1: "IMAGE_INACTIVE",
+	2: "IMAGE_ACTIVATING",
+	3: "IMAGE_ACTIVE",
+	4: "IMAGE_REVERTING",
+	5: "IMAGE_REVERTED",
+}
+
+var ImageDownload_ImageActivateState_value = map[string]int32{
+	"IMAGE_UNKNOWN":    0,
+	"IMAGE_INACTIVE":   1,
+	"IMAGE_ACTIVATING": 2,
+	"IMAGE_ACTIVE":     3,
+	"IMAGE_REVERTING":  4,
+	"IMAGE_REVERTED":   5,
+}
+
+func (x ImageDownload_ImageActivateState) String() string {
+	return proto.EnumName(ImageDownload_ImageActivateState_name, int32(x))
+}
+
+func (ImageDownload_ImageActivateState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{6, 2}
+}
+
+type ImageState_ImageDownloadState int32
+
+const (
+	ImageState_DOWNLOAD_UNKNOWN     ImageState_ImageDownloadState = 0
+	ImageState_DOWNLOAD_SUCCEEDED   ImageState_ImageDownloadState = 1
+	ImageState_DOWNLOAD_REQUESTED   ImageState_ImageDownloadState = 2
+	ImageState_DOWNLOAD_STARTED     ImageState_ImageDownloadState = 3
+	ImageState_DOWNLOAD_FAILED      ImageState_ImageDownloadState = 4
+	ImageState_DOWNLOAD_UNSUPPORTED ImageState_ImageDownloadState = 5
+	ImageState_DOWNLOAD_CANCELLING  ImageState_ImageDownloadState = 6
+	ImageState_DOWNLOAD_CANCELLED   ImageState_ImageDownloadState = 7
+)
+
+var ImageState_ImageDownloadState_name = map[int32]string{
+	0: "DOWNLOAD_UNKNOWN",
+	1: "DOWNLOAD_SUCCEEDED",
+	2: "DOWNLOAD_REQUESTED",
+	3: "DOWNLOAD_STARTED",
+	4: "DOWNLOAD_FAILED",
+	5: "DOWNLOAD_UNSUPPORTED",
+	6: "DOWNLOAD_CANCELLING",
+	7: "DOWNLOAD_CANCELLED",
+}
+
+var ImageState_ImageDownloadState_value = map[string]int32{
+	"DOWNLOAD_UNKNOWN":     0,
+	"DOWNLOAD_SUCCEEDED":   1,
+	"DOWNLOAD_REQUESTED":   2,
+	"DOWNLOAD_STARTED":     3,
+	"DOWNLOAD_FAILED":      4,
+	"DOWNLOAD_UNSUPPORTED": 5,
+	"DOWNLOAD_CANCELLING":  6,
+	"DOWNLOAD_CANCELLED":   7,
+}
+
+func (x ImageState_ImageDownloadState) String() string {
+	return proto.EnumName(ImageState_ImageDownloadState_name, int32(x))
+}
+
+func (ImageState_ImageDownloadState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{12, 0}
+}
+
+type ImageState_ImageFailureReason int32
+
+const (
+	ImageState_NO_ERROR               ImageState_ImageFailureReason = 0
+	ImageState_INVALID_URL            ImageState_ImageFailureReason = 1
+	ImageState_DEVICE_BUSY            ImageState_ImageFailureReason = 2
+	ImageState_INSUFFICIENT_SPACE     ImageState_ImageFailureReason = 3
+	ImageState_UNKNOWN_ERROR          ImageState_ImageFailureReason = 4
+	ImageState_CANCELLED_ON_REQUEST   ImageState_ImageFailureReason = 5
+	ImageState_CANCELLED_ON_ONU_STATE ImageState_ImageFailureReason = 6
+	ImageState_VENDOR_DEVICE_MISMATCH ImageState_ImageFailureReason = 7
+	ImageState_OMCI_TRANSFER_ERROR    ImageState_ImageFailureReason = 8
+	ImageState_IMAGE_REFUSED_BY_ONU   ImageState_ImageFailureReason = 9
+)
+
+var ImageState_ImageFailureReason_name = map[int32]string{
+	0: "NO_ERROR",
+	1: "INVALID_URL",
+	2: "DEVICE_BUSY",
+	3: "INSUFFICIENT_SPACE",
+	4: "UNKNOWN_ERROR",
+	5: "CANCELLED_ON_REQUEST",
+	6: "CANCELLED_ON_ONU_STATE",
+	7: "VENDOR_DEVICE_MISMATCH",
+	8: "OMCI_TRANSFER_ERROR",
+	9: "IMAGE_REFUSED_BY_ONU",
+}
+
+var ImageState_ImageFailureReason_value = map[string]int32{
+	"NO_ERROR":               0,
+	"INVALID_URL":            1,
+	"DEVICE_BUSY":            2,
+	"INSUFFICIENT_SPACE":     3,
+	"UNKNOWN_ERROR":          4,
+	"CANCELLED_ON_REQUEST":   5,
+	"CANCELLED_ON_ONU_STATE": 6,
+	"VENDOR_DEVICE_MISMATCH": 7,
+	"OMCI_TRANSFER_ERROR":    8,
+	"IMAGE_REFUSED_BY_ONU":   9,
+}
+
+func (x ImageState_ImageFailureReason) String() string {
+	return proto.EnumName(ImageState_ImageFailureReason_name, int32(x))
+}
+
+func (ImageState_ImageFailureReason) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{12, 1}
+}
+
+type ImageState_ImageActivationState int32
+
+const (
+	ImageState_IMAGE_UNKNOWN             ImageState_ImageActivationState = 0
+	ImageState_IMAGE_INACTIVE            ImageState_ImageActivationState = 1
+	ImageState_IMAGE_ACTIVATING          ImageState_ImageActivationState = 2
+	ImageState_IMAGE_ACTIVE              ImageState_ImageActivationState = 3
+	ImageState_IMAGE_COMMITTING          ImageState_ImageActivationState = 4
+	ImageState_IMAGE_COMMITTED           ImageState_ImageActivationState = 5
+	ImageState_IMAGE_ACTIVATION_ABORTING ImageState_ImageActivationState = 6
+	ImageState_IMAGE_ACTIVATION_ABORTED  ImageState_ImageActivationState = 7
+	ImageState_IMAGE_COMMIT_ABORTING     ImageState_ImageActivationState = 8
+	ImageState_IMAGE_COMMIT_ABORTED      ImageState_ImageActivationState = 9
+	ImageState_IMAGE_DOWNLOADING         ImageState_ImageActivationState = 10
+)
+
+var ImageState_ImageActivationState_name = map[int32]string{
+	0:  "IMAGE_UNKNOWN",
+	1:  "IMAGE_INACTIVE",
+	2:  "IMAGE_ACTIVATING",
+	3:  "IMAGE_ACTIVE",
+	4:  "IMAGE_COMMITTING",
+	5:  "IMAGE_COMMITTED",
+	6:  "IMAGE_ACTIVATION_ABORTING",
+	7:  "IMAGE_ACTIVATION_ABORTED",
+	8:  "IMAGE_COMMIT_ABORTING",
+	9:  "IMAGE_COMMIT_ABORTED",
+	10: "IMAGE_DOWNLOADING",
+}
+
+var ImageState_ImageActivationState_value = map[string]int32{
+	"IMAGE_UNKNOWN":             0,
+	"IMAGE_INACTIVE":            1,
+	"IMAGE_ACTIVATING":          2,
+	"IMAGE_ACTIVE":              3,
+	"IMAGE_COMMITTING":          4,
+	"IMAGE_COMMITTED":           5,
+	"IMAGE_ACTIVATION_ABORTING": 6,
+	"IMAGE_ACTIVATION_ABORTED":  7,
+	"IMAGE_COMMIT_ABORTING":     8,
+	"IMAGE_COMMIT_ABORTED":      9,
+	"IMAGE_DOWNLOADING":         10,
+}
+
+func (x ImageState_ImageActivationState) String() string {
+	return proto.EnumName(ImageState_ImageActivationState_name, int32(x))
+}
+
+func (ImageState_ImageActivationState) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{12, 2}
+}
+
+type Port_PortType int32
+
+const (
+	Port_UNKNOWN      Port_PortType = 0
+	Port_ETHERNET_NNI Port_PortType = 1
+	Port_ETHERNET_UNI Port_PortType = 2
+	Port_PON_OLT      Port_PortType = 3
+	Port_PON_ONU      Port_PortType = 4
+	Port_VENET_OLT    Port_PortType = 5
+	Port_VENET_ONU    Port_PortType = 6
+)
+
+var Port_PortType_name = map[int32]string{
+	0: "UNKNOWN",
+	1: "ETHERNET_NNI",
+	2: "ETHERNET_UNI",
+	3: "PON_OLT",
+	4: "PON_ONU",
+	5: "VENET_OLT",
+	6: "VENET_ONU",
+}
+
+var Port_PortType_value = map[string]int32{
+	"UNKNOWN":      0,
+	"ETHERNET_NNI": 1,
+	"ETHERNET_UNI": 2,
+	"PON_OLT":      3,
+	"PON_ONU":      4,
+	"VENET_OLT":    5,
+	"VENET_ONU":    6,
+}
+
+func (x Port_PortType) String() string {
+	return proto.EnumName(Port_PortType_name, int32(x))
+}
+
+func (Port_PortType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{13, 0}
+}
+
+type SelfTestResponse_SelfTestResult int32
+
+const (
+	SelfTestResponse_SUCCESS       SelfTestResponse_SelfTestResult = 0
+	SelfTestResponse_FAILURE       SelfTestResponse_SelfTestResult = 1
+	SelfTestResponse_NOT_SUPPORTED SelfTestResponse_SelfTestResult = 2
+	SelfTestResponse_UNKNOWN_ERROR SelfTestResponse_SelfTestResult = 3
+)
+
+var SelfTestResponse_SelfTestResult_name = map[int32]string{
+	0: "SUCCESS",
+	1: "FAILURE",
+	2: "NOT_SUPPORTED",
+	3: "UNKNOWN_ERROR",
+}
+
+var SelfTestResponse_SelfTestResult_value = map[string]int32{
+	"SUCCESS":       0,
+	"FAILURE":       1,
+	"NOT_SUPPORTED": 2,
+	"UNKNOWN_ERROR": 3,
+}
+
+func (x SelfTestResponse_SelfTestResult) String() string {
+	return proto.EnumName(SelfTestResponse_SelfTestResult_name, int32(x))
+}
+
+func (SelfTestResponse_SelfTestResult) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{19, 0}
+}
+
+type SimulateAlarmRequest_OperationType int32
+
+const (
+	SimulateAlarmRequest_RAISE SimulateAlarmRequest_OperationType = 0
+	SimulateAlarmRequest_CLEAR SimulateAlarmRequest_OperationType = 1
+)
+
+var SimulateAlarmRequest_OperationType_name = map[int32]string{
+	0: "RAISE",
+	1: "CLEAR",
+}
+
+var SimulateAlarmRequest_OperationType_value = map[string]int32{
+	"RAISE": 0,
+	"CLEAR": 1,
+}
+
+func (x SimulateAlarmRequest_OperationType) String() string {
+	return proto.EnumName(SimulateAlarmRequest_OperationType_name, int32(x))
+}
+
+func (SimulateAlarmRequest_OperationType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{21, 0}
+}
+
+// A Device Type
+type DeviceType struct {
+	// Unique name for the device type
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Unique vendor id for the device type applicable to ONU
+	// 4 bytes of vendor id from ONU serial number
+	VendorId  string   `protobuf:"bytes,5,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	VendorIds []string `protobuf:"bytes,6,rep,name=vendor_ids,json=vendorIds,proto3" json:"vendor_ids,omitempty"`
+	// Name of the adapter that handles device type
+	// Deprecated and replaced by adapterType
+	Adapter string `protobuf:"bytes,2,opt,name=adapter,proto3" json:"adapter,omitempty"`
+	// Capabilities
+	AcceptsBulkFlowUpdate           bool `protobuf:"varint,3,opt,name=accepts_bulk_flow_update,json=acceptsBulkFlowUpdate,proto3" json:"accepts_bulk_flow_update,omitempty"`
+	AcceptsAddRemoveFlowUpdates     bool `protobuf:"varint,4,opt,name=accepts_add_remove_flow_updates,json=acceptsAddRemoveFlowUpdates,proto3" json:"accepts_add_remove_flow_updates,omitempty"`
+	AcceptsDirectLogicalFlowsUpdate bool `protobuf:"varint,7,opt,name=accepts_direct_logical_flows_update,json=acceptsDirectLogicalFlowsUpdate,proto3" json:"accepts_direct_logical_flows_update,omitempty"`
+	// Type of adapter that can handle this device type
+	AdapterType          string   `protobuf:"bytes,8,opt,name=adapter_type,json=adapterType,proto3" json:"adapter_type,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeviceType) Reset()         { *m = DeviceType{} }
+func (m *DeviceType) String() string { return proto.CompactTextString(m) }
+func (*DeviceType) ProtoMessage()    {}
+func (*DeviceType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{0}
+}
+
+func (m *DeviceType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceType.Unmarshal(m, b)
+}
+func (m *DeviceType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceType.Marshal(b, m, deterministic)
+}
+func (m *DeviceType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceType.Merge(m, src)
+}
+func (m *DeviceType) XXX_Size() int {
+	return xxx_messageInfo_DeviceType.Size(m)
+}
+func (m *DeviceType) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceType proto.InternalMessageInfo
+
+func (m *DeviceType) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *DeviceType) GetVendorId() string {
+	if m != nil {
+		return m.VendorId
+	}
+	return ""
+}
+
+func (m *DeviceType) GetVendorIds() []string {
+	if m != nil {
+		return m.VendorIds
+	}
+	return nil
+}
+
+func (m *DeviceType) GetAdapter() string {
+	if m != nil {
+		return m.Adapter
+	}
+	return ""
+}
+
+func (m *DeviceType) GetAcceptsBulkFlowUpdate() bool {
+	if m != nil {
+		return m.AcceptsBulkFlowUpdate
+	}
+	return false
+}
+
+func (m *DeviceType) GetAcceptsAddRemoveFlowUpdates() bool {
+	if m != nil {
+		return m.AcceptsAddRemoveFlowUpdates
+	}
+	return false
+}
+
+func (m *DeviceType) GetAcceptsDirectLogicalFlowsUpdate() bool {
+	if m != nil {
+		return m.AcceptsDirectLogicalFlowsUpdate
+	}
+	return false
+}
+
+func (m *DeviceType) GetAdapterType() string {
+	if m != nil {
+		return m.AdapterType
+	}
+	return ""
+}
+
+// A plurality of device types
+type DeviceTypes struct {
+	Items                []*DeviceType `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}      `json:"-"`
+	XXX_unrecognized     []byte        `json:"-"`
+	XXX_sizecache        int32         `json:"-"`
+}
+
+func (m *DeviceTypes) Reset()         { *m = DeviceTypes{} }
+func (m *DeviceTypes) String() string { return proto.CompactTextString(m) }
+func (*DeviceTypes) ProtoMessage()    {}
+func (*DeviceTypes) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{1}
+}
+
+func (m *DeviceTypes) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceTypes.Unmarshal(m, b)
+}
+func (m *DeviceTypes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceTypes.Marshal(b, m, deterministic)
+}
+func (m *DeviceTypes) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceTypes.Merge(m, src)
+}
+func (m *DeviceTypes) XXX_Size() int {
+	return xxx_messageInfo_DeviceTypes.Size(m)
+}
+func (m *DeviceTypes) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceTypes.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceTypes proto.InternalMessageInfo
+
+func (m *DeviceTypes) GetItems() []*DeviceType {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type PmConfig struct {
+	Name                 string          `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	Type                 PmConfig_PmType `protobuf:"varint,2,opt,name=type,proto3,enum=device.PmConfig_PmType" json:"type,omitempty"`
+	Enabled              bool            `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	SampleFreq           uint32          `protobuf:"varint,4,opt,name=sample_freq,json=sampleFreq,proto3" json:"sample_freq,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *PmConfig) Reset()         { *m = PmConfig{} }
+func (m *PmConfig) String() string { return proto.CompactTextString(m) }
+func (*PmConfig) ProtoMessage()    {}
+func (*PmConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{2}
+}
+
+func (m *PmConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmConfig.Unmarshal(m, b)
+}
+func (m *PmConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmConfig.Marshal(b, m, deterministic)
+}
+func (m *PmConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmConfig.Merge(m, src)
+}
+func (m *PmConfig) XXX_Size() int {
+	return xxx_messageInfo_PmConfig.Size(m)
+}
+func (m *PmConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmConfig proto.InternalMessageInfo
+
+func (m *PmConfig) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *PmConfig) GetType() PmConfig_PmType {
+	if m != nil {
+		return m.Type
+	}
+	return PmConfig_COUNTER
+}
+
+func (m *PmConfig) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *PmConfig) GetSampleFreq() uint32 {
+	if m != nil {
+		return m.SampleFreq
+	}
+	return 0
+}
+
+type PmGroupConfig struct {
+	GroupName            string      `protobuf:"bytes,1,opt,name=group_name,json=groupName,proto3" json:"group_name,omitempty"`
+	GroupFreq            uint32      `protobuf:"varint,2,opt,name=group_freq,json=groupFreq,proto3" json:"group_freq,omitempty"`
+	Enabled              bool        `protobuf:"varint,3,opt,name=enabled,proto3" json:"enabled,omitempty"`
+	Metrics              []*PmConfig `protobuf:"bytes,4,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *PmGroupConfig) Reset()         { *m = PmGroupConfig{} }
+func (m *PmGroupConfig) String() string { return proto.CompactTextString(m) }
+func (*PmGroupConfig) ProtoMessage()    {}
+func (*PmGroupConfig) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{3}
+}
+
+func (m *PmGroupConfig) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmGroupConfig.Unmarshal(m, b)
+}
+func (m *PmGroupConfig) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmGroupConfig.Marshal(b, m, deterministic)
+}
+func (m *PmGroupConfig) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmGroupConfig.Merge(m, src)
+}
+func (m *PmGroupConfig) XXX_Size() int {
+	return xxx_messageInfo_PmGroupConfig.Size(m)
+}
+func (m *PmGroupConfig) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmGroupConfig.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmGroupConfig proto.InternalMessageInfo
+
+func (m *PmGroupConfig) GetGroupName() string {
+	if m != nil {
+		return m.GroupName
+	}
+	return ""
+}
+
+func (m *PmGroupConfig) GetGroupFreq() uint32 {
+	if m != nil {
+		return m.GroupFreq
+	}
+	return 0
+}
+
+func (m *PmGroupConfig) GetEnabled() bool {
+	if m != nil {
+		return m.Enabled
+	}
+	return false
+}
+
+func (m *PmGroupConfig) GetMetrics() []*PmConfig {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+type PmConfigs struct {
+	Id          string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	DefaultFreq uint32 `protobuf:"varint,2,opt,name=default_freq,json=defaultFreq,proto3" json:"default_freq,omitempty"`
+	// Forces group names and group semantics
+	Grouped bool `protobuf:"varint,3,opt,name=grouped,proto3" json:"grouped,omitempty"`
+	// Allows Pm to set an individual sample frequency
+	FreqOverride         bool             `protobuf:"varint,4,opt,name=freq_override,json=freqOverride,proto3" json:"freq_override,omitempty"`
+	Groups               []*PmGroupConfig `protobuf:"bytes,5,rep,name=groups,proto3" json:"groups,omitempty"`
+	Metrics              []*PmConfig      `protobuf:"bytes,6,rep,name=metrics,proto3" json:"metrics,omitempty"`
+	MaxSkew              uint32           `protobuf:"varint,7,opt,name=max_skew,json=maxSkew,proto3" json:"max_skew,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *PmConfigs) Reset()         { *m = PmConfigs{} }
+func (m *PmConfigs) String() string { return proto.CompactTextString(m) }
+func (*PmConfigs) ProtoMessage()    {}
+func (*PmConfigs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{4}
+}
+
+func (m *PmConfigs) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_PmConfigs.Unmarshal(m, b)
+}
+func (m *PmConfigs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_PmConfigs.Marshal(b, m, deterministic)
+}
+func (m *PmConfigs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_PmConfigs.Merge(m, src)
+}
+func (m *PmConfigs) XXX_Size() int {
+	return xxx_messageInfo_PmConfigs.Size(m)
+}
+func (m *PmConfigs) XXX_DiscardUnknown() {
+	xxx_messageInfo_PmConfigs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_PmConfigs proto.InternalMessageInfo
+
+func (m *PmConfigs) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *PmConfigs) GetDefaultFreq() uint32 {
+	if m != nil {
+		return m.DefaultFreq
+	}
+	return 0
+}
+
+func (m *PmConfigs) GetGrouped() bool {
+	if m != nil {
+		return m.Grouped
+	}
+	return false
+}
+
+func (m *PmConfigs) GetFreqOverride() bool {
+	if m != nil {
+		return m.FreqOverride
+	}
+	return false
+}
+
+func (m *PmConfigs) GetGroups() []*PmGroupConfig {
+	if m != nil {
+		return m.Groups
+	}
+	return nil
+}
+
+func (m *PmConfigs) GetMetrics() []*PmConfig {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+func (m *PmConfigs) GetMaxSkew() uint32 {
+	if m != nil {
+		return m.MaxSkew
+	}
+	return 0
+}
+
+//Object representing an image
+type Image struct {
+	Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"`
+	// Version, this is the sole identifier of the image. it's the vendor specified OMCI version
+	// must be known at the time of initiating a download, activate, commit image on an onu.
+	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	// hash of the image to be verified against
+	// Deprecated in voltha 2.8, will be removed
+	Hash uint32 `protobuf:"varint,3,opt,name=hash,proto3" json:"hash,omitempty"`
+	// Deprecated in voltha 2.8, will be removed
+	InstallDatetime string `protobuf:"bytes,4,opt,name=install_datetime,json=installDatetime,proto3" json:"install_datetime,omitempty"`
+	// The active software image is one that is currently loaded and executing
+	// in the ONU or circuit pack. Under normal operation, one software image
+	// is always active while the other is inactive. Under no circumstances are
+	// both software images allowed to be active at the same time
+	// Deprecated in voltha 2.8, will be removed
+	IsActive bool `protobuf:"varint,5,opt,name=is_active,json=isActive,proto3" json:"is_active,omitempty"`
+	// The committed software image is loaded and executed upon reboot of the
+	// ONU and/or circuit pack. During normal operation, one software image is
+	// always committed, while the other is uncommitted.
+	// Deprecated in voltha 2.8, will be removed
+	IsCommitted bool `protobuf:"varint,6,opt,name=is_committed,json=isCommitted,proto3" json:"is_committed,omitempty"`
+	// A software image is valid if it has been verified to be an executable
+	// code image. The verification mechanism is not subject to standardization;
+	// however, it should include at least a data integrity (e.g., CRC) check of
+	// the entire code image.
+	// Deprecated in voltha 2.8, will be removed
+	IsValid bool `protobuf:"varint,7,opt,name=is_valid,json=isValid,proto3" json:"is_valid,omitempty"`
+	// URL where the image is available
+	// URL MUST be fully qualified,
+	// including filename, username and password
+	Url string `protobuf:"bytes,8,opt,name=url,proto3" json:"url,omitempty"`
+	// Represents the vendor/device mfr
+	// Needs to match the vendor of the device the image will be installed on
+	// optional, if not matched no check will be performed
+	Vendor string `protobuf:"bytes,9,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	// Represents the ONU Image CRC value.
+	// Default to value 0 if not specified.
+	// If different then 0 it's used to verify the image retrieved from outside before sending it to the ONU.
+	// Calculation of this needs to be done according to ITU-T I.363.5 as per OMCI spec (section A.2.27)
+	Crc32                uint32   `protobuf:"varint,10,opt,name=crc32,proto3" json:"crc32,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Image) Reset()         { *m = Image{} }
+func (m *Image) String() string { return proto.CompactTextString(m) }
+func (*Image) ProtoMessage()    {}
+func (*Image) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{5}
+}
+
+func (m *Image) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Image.Unmarshal(m, b)
+}
+func (m *Image) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Image.Marshal(b, m, deterministic)
+}
+func (m *Image) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Image.Merge(m, src)
+}
+func (m *Image) XXX_Size() int {
+	return xxx_messageInfo_Image.Size(m)
+}
+func (m *Image) XXX_DiscardUnknown() {
+	xxx_messageInfo_Image.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Image proto.InternalMessageInfo
+
+func (m *Image) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *Image) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Image) GetHash() uint32 {
+	if m != nil {
+		return m.Hash
+	}
+	return 0
+}
+
+func (m *Image) GetInstallDatetime() string {
+	if m != nil {
+		return m.InstallDatetime
+	}
+	return ""
+}
+
+func (m *Image) GetIsActive() bool {
+	if m != nil {
+		return m.IsActive
+	}
+	return false
+}
+
+func (m *Image) GetIsCommitted() bool {
+	if m != nil {
+		return m.IsCommitted
+	}
+	return false
+}
+
+func (m *Image) GetIsValid() bool {
+	if m != nil {
+		return m.IsValid
+	}
+	return false
+}
+
+func (m *Image) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *Image) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *Image) GetCrc32() uint32 {
+	if m != nil {
+		return m.Crc32
+	}
+	return 0
+}
+
+// Older version of the API please see DeviceImageDownloadRequest
+// Deprecated in voltha 2.8, will be removed
+//
+// Deprecated: Do not use.
+type ImageDownload struct {
+	// Device Identifier
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Image unique identifier
+	Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"`
+	// URL where the image is available
+	// should include username password
+	Url string `protobuf:"bytes,3,opt,name=url,proto3" json:"url,omitempty"`
+	// CRC of the image to be verified aginst
+	Crc uint32 `protobuf:"varint,4,opt,name=crc,proto3" json:"crc,omitempty"`
+	// Download state
+	DownloadState ImageDownload_ImageDownloadState `protobuf:"varint,5,opt,name=download_state,json=downloadState,proto3,enum=device.ImageDownload_ImageDownloadState" json:"download_state,omitempty"`
+	// Downloaded version
+	ImageVersion string `protobuf:"bytes,6,opt,name=image_version,json=imageVersion,proto3" json:"image_version,omitempty"`
+	// Bytes downloaded
+	DownloadedBytes uint32 `protobuf:"varint,7,opt,name=downloaded_bytes,json=downloadedBytes,proto3" json:"downloaded_bytes,omitempty"`
+	// Download failure reason
+	Reason ImageDownload_ImageDownloadFailureReason `protobuf:"varint,8,opt,name=reason,proto3,enum=device.ImageDownload_ImageDownloadFailureReason" json:"reason,omitempty"`
+	// Additional info
+	AdditionalInfo string `protobuf:"bytes,9,opt,name=additional_info,json=additionalInfo,proto3" json:"additional_info,omitempty"`
+	// Save current configuration
+	SaveConfig bool `protobuf:"varint,10,opt,name=save_config,json=saveConfig,proto3" json:"save_config,omitempty"`
+	// Image local location
+	LocalDir string `protobuf:"bytes,11,opt,name=local_dir,json=localDir,proto3" json:"local_dir,omitempty"`
+	// Image activation state
+	ImageState ImageDownload_ImageActivateState `protobuf:"varint,12,opt,name=image_state,json=imageState,proto3,enum=device.ImageDownload_ImageActivateState" json:"image_state,omitempty"`
+	// Image file size
+	FileSize             uint32   `protobuf:"varint,13,opt,name=file_size,json=fileSize,proto3" json:"file_size,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ImageDownload) Reset()         { *m = ImageDownload{} }
+func (m *ImageDownload) String() string { return proto.CompactTextString(m) }
+func (*ImageDownload) ProtoMessage()    {}
+func (*ImageDownload) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{6}
+}
+
+func (m *ImageDownload) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ImageDownload.Unmarshal(m, b)
+}
+func (m *ImageDownload) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ImageDownload.Marshal(b, m, deterministic)
+}
+func (m *ImageDownload) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageDownload.Merge(m, src)
+}
+func (m *ImageDownload) XXX_Size() int {
+	return xxx_messageInfo_ImageDownload.Size(m)
+}
+func (m *ImageDownload) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageDownload.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageDownload proto.InternalMessageInfo
+
+func (m *ImageDownload) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetName() string {
+	if m != nil {
+		return m.Name
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetUrl() string {
+	if m != nil {
+		return m.Url
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetCrc() uint32 {
+	if m != nil {
+		return m.Crc
+	}
+	return 0
+}
+
+func (m *ImageDownload) GetDownloadState() ImageDownload_ImageDownloadState {
+	if m != nil {
+		return m.DownloadState
+	}
+	return ImageDownload_DOWNLOAD_UNKNOWN
+}
+
+func (m *ImageDownload) GetImageVersion() string {
+	if m != nil {
+		return m.ImageVersion
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetDownloadedBytes() uint32 {
+	if m != nil {
+		return m.DownloadedBytes
+	}
+	return 0
+}
+
+func (m *ImageDownload) GetReason() ImageDownload_ImageDownloadFailureReason {
+	if m != nil {
+		return m.Reason
+	}
+	return ImageDownload_NO_ERROR
+}
+
+func (m *ImageDownload) GetAdditionalInfo() string {
+	if m != nil {
+		return m.AdditionalInfo
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetSaveConfig() bool {
+	if m != nil {
+		return m.SaveConfig
+	}
+	return false
+}
+
+func (m *ImageDownload) GetLocalDir() string {
+	if m != nil {
+		return m.LocalDir
+	}
+	return ""
+}
+
+func (m *ImageDownload) GetImageState() ImageDownload_ImageActivateState {
+	if m != nil {
+		return m.ImageState
+	}
+	return ImageDownload_IMAGE_UNKNOWN
+}
+
+func (m *ImageDownload) GetFileSize() uint32 {
+	if m != nil {
+		return m.FileSize
+	}
+	return 0
+}
+
+// Deprecated in voltha 2.8, will be removed
+//
+// Deprecated: Do not use.
+type ImageDownloads struct {
+	Items                []*ImageDownload `protobuf:"bytes,2,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *ImageDownloads) Reset()         { *m = ImageDownloads{} }
+func (m *ImageDownloads) String() string { return proto.CompactTextString(m) }
+func (*ImageDownloads) ProtoMessage()    {}
+func (*ImageDownloads) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{7}
+}
+
+func (m *ImageDownloads) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ImageDownloads.Unmarshal(m, b)
+}
+func (m *ImageDownloads) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ImageDownloads.Marshal(b, m, deterministic)
+}
+func (m *ImageDownloads) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageDownloads.Merge(m, src)
+}
+func (m *ImageDownloads) XXX_Size() int {
+	return xxx_messageInfo_ImageDownloads.Size(m)
+}
+func (m *ImageDownloads) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageDownloads.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageDownloads proto.InternalMessageInfo
+
+func (m *ImageDownloads) GetItems() []*ImageDownload {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type Images struct {
+	Image                []*Image `protobuf:"bytes,1,rep,name=image,proto3" json:"image,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Images) Reset()         { *m = Images{} }
+func (m *Images) String() string { return proto.CompactTextString(m) }
+func (*Images) ProtoMessage()    {}
+func (*Images) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{8}
+}
+
+func (m *Images) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Images.Unmarshal(m, b)
+}
+func (m *Images) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Images.Marshal(b, m, deterministic)
+}
+func (m *Images) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Images.Merge(m, src)
+}
+func (m *Images) XXX_Size() int {
+	return xxx_messageInfo_Images.Size(m)
+}
+func (m *Images) XXX_DiscardUnknown() {
+	xxx_messageInfo_Images.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Images proto.InternalMessageInfo
+
+func (m *Images) GetImage() []*Image {
+	if m != nil {
+		return m.Image
+	}
+	return nil
+}
+
+// OnuImage represents the OMCI information as per OMCI spec
+// the information will be populates exactly as extracted from the device.
+type OnuImage struct {
+	//image version
+	Version     string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	IsCommited  bool   `protobuf:"varint,2,opt,name=isCommited,proto3" json:"isCommited,omitempty"`
+	IsActive    bool   `protobuf:"varint,3,opt,name=isActive,proto3" json:"isActive,omitempty"`
+	IsValid     bool   `protobuf:"varint,4,opt,name=isValid,proto3" json:"isValid,omitempty"`
+	ProductCode string `protobuf:"bytes,5,opt,name=productCode,proto3" json:"productCode,omitempty"`
+	// Hash is computed by the ONU and is optional as per OMCI spec (paragraph 9.1.4)
+	// No assumption should be made on the existence of this attribute at any time.
+	Hash                 string   `protobuf:"bytes,6,opt,name=hash,proto3" json:"hash,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *OnuImage) Reset()         { *m = OnuImage{} }
+func (m *OnuImage) String() string { return proto.CompactTextString(m) }
+func (*OnuImage) ProtoMessage()    {}
+func (*OnuImage) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{9}
+}
+
+func (m *OnuImage) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuImage.Unmarshal(m, b)
+}
+func (m *OnuImage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuImage.Marshal(b, m, deterministic)
+}
+func (m *OnuImage) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuImage.Merge(m, src)
+}
+func (m *OnuImage) XXX_Size() int {
+	return xxx_messageInfo_OnuImage.Size(m)
+}
+func (m *OnuImage) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuImage.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuImage proto.InternalMessageInfo
+
+func (m *OnuImage) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *OnuImage) GetIsCommited() bool {
+	if m != nil {
+		return m.IsCommited
+	}
+	return false
+}
+
+func (m *OnuImage) GetIsActive() bool {
+	if m != nil {
+		return m.IsActive
+	}
+	return false
+}
+
+func (m *OnuImage) GetIsValid() bool {
+	if m != nil {
+		return m.IsValid
+	}
+	return false
+}
+
+func (m *OnuImage) GetProductCode() string {
+	if m != nil {
+		return m.ProductCode
+	}
+	return ""
+}
+
+func (m *OnuImage) GetHash() string {
+	if m != nil {
+		return m.Hash
+	}
+	return ""
+}
+
+type OnuImages struct {
+	Items                []*OnuImage `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *OnuImages) Reset()         { *m = OnuImages{} }
+func (m *OnuImages) String() string { return proto.CompactTextString(m) }
+func (*OnuImages) ProtoMessage()    {}
+func (*OnuImages) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{10}
+}
+
+func (m *OnuImages) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_OnuImages.Unmarshal(m, b)
+}
+func (m *OnuImages) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_OnuImages.Marshal(b, m, deterministic)
+}
+func (m *OnuImages) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_OnuImages.Merge(m, src)
+}
+func (m *OnuImages) XXX_Size() int {
+	return xxx_messageInfo_OnuImages.Size(m)
+}
+func (m *OnuImages) XXX_DiscardUnknown() {
+	xxx_messageInfo_OnuImages.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_OnuImages proto.InternalMessageInfo
+
+func (m *OnuImages) GetItems() []*OnuImage {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type DeviceImageState struct {
+	DeviceId             string      `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	ImageState           *ImageState `protobuf:"bytes,2,opt,name=imageState,proto3" json:"imageState,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}    `json:"-"`
+	XXX_unrecognized     []byte      `json:"-"`
+	XXX_sizecache        int32       `json:"-"`
+}
+
+func (m *DeviceImageState) Reset()         { *m = DeviceImageState{} }
+func (m *DeviceImageState) String() string { return proto.CompactTextString(m) }
+func (*DeviceImageState) ProtoMessage()    {}
+func (*DeviceImageState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{11}
+}
+
+func (m *DeviceImageState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceImageState.Unmarshal(m, b)
+}
+func (m *DeviceImageState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceImageState.Marshal(b, m, deterministic)
+}
+func (m *DeviceImageState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceImageState.Merge(m, src)
+}
+func (m *DeviceImageState) XXX_Size() int {
+	return xxx_messageInfo_DeviceImageState.Size(m)
+}
+func (m *DeviceImageState) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceImageState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceImageState proto.InternalMessageInfo
+
+func (m *DeviceImageState) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *DeviceImageState) GetImageState() *ImageState {
+	if m != nil {
+		return m.ImageState
+	}
+	return nil
+}
+
+type ImageState struct {
+	// image version
+	Version string `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	// Download state
+	DownloadState ImageState_ImageDownloadState `protobuf:"varint,2,opt,name=download_state,json=downloadState,proto3,enum=device.ImageState_ImageDownloadState" json:"download_state,omitempty"`
+	// Image Operation Failure reason (use for both Download and Activate)
+	Reason ImageState_ImageFailureReason `protobuf:"varint,3,opt,name=reason,proto3,enum=device.ImageState_ImageFailureReason" json:"reason,omitempty"`
+	// Image activation state
+	ImageState           ImageState_ImageActivationState `protobuf:"varint,4,opt,name=image_state,json=imageState,proto3,enum=device.ImageState_ImageActivationState" json:"image_state,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *ImageState) Reset()         { *m = ImageState{} }
+func (m *ImageState) String() string { return proto.CompactTextString(m) }
+func (*ImageState) ProtoMessage()    {}
+func (*ImageState) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{12}
+}
+
+func (m *ImageState) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ImageState.Unmarshal(m, b)
+}
+func (m *ImageState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ImageState.Marshal(b, m, deterministic)
+}
+func (m *ImageState) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ImageState.Merge(m, src)
+}
+func (m *ImageState) XXX_Size() int {
+	return xxx_messageInfo_ImageState.Size(m)
+}
+func (m *ImageState) XXX_DiscardUnknown() {
+	xxx_messageInfo_ImageState.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ImageState proto.InternalMessageInfo
+
+func (m *ImageState) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *ImageState) GetDownloadState() ImageState_ImageDownloadState {
+	if m != nil {
+		return m.DownloadState
+	}
+	return ImageState_DOWNLOAD_UNKNOWN
+}
+
+func (m *ImageState) GetReason() ImageState_ImageFailureReason {
+	if m != nil {
+		return m.Reason
+	}
+	return ImageState_NO_ERROR
+}
+
+func (m *ImageState) GetImageState() ImageState_ImageActivationState {
+	if m != nil {
+		return m.ImageState
+	}
+	return ImageState_IMAGE_UNKNOWN
+}
+
+type Port struct {
+	PortNo     uint32                  `protobuf:"varint,1,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	Label      string                  `protobuf:"bytes,2,opt,name=label,proto3" json:"label,omitempty"`
+	Type       Port_PortType           `protobuf:"varint,3,opt,name=type,proto3,enum=device.Port_PortType" json:"type,omitempty"`
+	AdminState common.AdminState_Types `protobuf:"varint,5,opt,name=admin_state,json=adminState,proto3,enum=common.AdminState_Types" json:"admin_state,omitempty"`
+	OperStatus common.OperStatus_Types `protobuf:"varint,6,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	DeviceId   string                  `protobuf:"bytes,7,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Peers      []*Port_PeerPort        `protobuf:"bytes,8,rep,name=peers,proto3" json:"peers,omitempty"`
+	RxPackets  uint64                  `protobuf:"fixed64,9,opt,name=rx_packets,json=rxPackets,proto3" json:"rx_packets,omitempty"`
+	RxBytes    uint64                  `protobuf:"fixed64,10,opt,name=rx_bytes,json=rxBytes,proto3" json:"rx_bytes,omitempty"`
+	RxErrors   uint64                  `protobuf:"fixed64,11,opt,name=rx_errors,json=rxErrors,proto3" json:"rx_errors,omitempty"`
+	TxPackets  uint64                  `protobuf:"fixed64,12,opt,name=tx_packets,json=txPackets,proto3" json:"tx_packets,omitempty"`
+	TxBytes    uint64                  `protobuf:"fixed64,13,opt,name=tx_bytes,json=txBytes,proto3" json:"tx_bytes,omitempty"`
+	TxErrors   uint64                  `protobuf:"fixed64,14,opt,name=tx_errors,json=txErrors,proto3" json:"tx_errors,omitempty"`
+	// ofp_port represents the characteristics of a port, e.g. hardware
+	// address and supported features.  This field is relevant only for
+	// UNI and NNI ports.   For PON ports, it can be left empty.
+	OfpPort              *openflow_13.OfpPort `protobuf:"bytes,15,opt,name=ofp_port,json=ofpPort,proto3" json:"ofp_port,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *Port) Reset()         { *m = Port{} }
+func (m *Port) String() string { return proto.CompactTextString(m) }
+func (*Port) ProtoMessage()    {}
+func (*Port) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{13}
+}
+
+func (m *Port) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Port.Unmarshal(m, b)
+}
+func (m *Port) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Port.Marshal(b, m, deterministic)
+}
+func (m *Port) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Port.Merge(m, src)
+}
+func (m *Port) XXX_Size() int {
+	return xxx_messageInfo_Port.Size(m)
+}
+func (m *Port) XXX_DiscardUnknown() {
+	xxx_messageInfo_Port.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Port proto.InternalMessageInfo
+
+func (m *Port) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+func (m *Port) GetLabel() string {
+	if m != nil {
+		return m.Label
+	}
+	return ""
+}
+
+func (m *Port) GetType() Port_PortType {
+	if m != nil {
+		return m.Type
+	}
+	return Port_UNKNOWN
+}
+
+func (m *Port) GetAdminState() common.AdminState_Types {
+	if m != nil {
+		return m.AdminState
+	}
+	return common.AdminState_UNKNOWN
+}
+
+func (m *Port) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+func (m *Port) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *Port) GetPeers() []*Port_PeerPort {
+	if m != nil {
+		return m.Peers
+	}
+	return nil
+}
+
+func (m *Port) GetRxPackets() uint64 {
+	if m != nil {
+		return m.RxPackets
+	}
+	return 0
+}
+
+func (m *Port) GetRxBytes() uint64 {
+	if m != nil {
+		return m.RxBytes
+	}
+	return 0
+}
+
+func (m *Port) GetRxErrors() uint64 {
+	if m != nil {
+		return m.RxErrors
+	}
+	return 0
+}
+
+func (m *Port) GetTxPackets() uint64 {
+	if m != nil {
+		return m.TxPackets
+	}
+	return 0
+}
+
+func (m *Port) GetTxBytes() uint64 {
+	if m != nil {
+		return m.TxBytes
+	}
+	return 0
+}
+
+func (m *Port) GetTxErrors() uint64 {
+	if m != nil {
+		return m.TxErrors
+	}
+	return 0
+}
+
+func (m *Port) GetOfpPort() *openflow_13.OfpPort {
+	if m != nil {
+		return m.OfpPort
+	}
+	return nil
+}
+
+type Port_PeerPort struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	PortNo               uint32   `protobuf:"varint,2,opt,name=port_no,json=portNo,proto3" json:"port_no,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Port_PeerPort) Reset()         { *m = Port_PeerPort{} }
+func (m *Port_PeerPort) String() string { return proto.CompactTextString(m) }
+func (*Port_PeerPort) ProtoMessage()    {}
+func (*Port_PeerPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{13, 0}
+}
+
+func (m *Port_PeerPort) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Port_PeerPort.Unmarshal(m, b)
+}
+func (m *Port_PeerPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Port_PeerPort.Marshal(b, m, deterministic)
+}
+func (m *Port_PeerPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Port_PeerPort.Merge(m, src)
+}
+func (m *Port_PeerPort) XXX_Size() int {
+	return xxx_messageInfo_Port_PeerPort.Size(m)
+}
+func (m *Port_PeerPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_Port_PeerPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Port_PeerPort proto.InternalMessageInfo
+
+func (m *Port_PeerPort) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *Port_PeerPort) GetPortNo() uint32 {
+	if m != nil {
+		return m.PortNo
+	}
+	return 0
+}
+
+type Ports struct {
+	Items                []*Port  `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Ports) Reset()         { *m = Ports{} }
+func (m *Ports) String() string { return proto.CompactTextString(m) }
+func (*Ports) ProtoMessage()    {}
+func (*Ports) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{14}
+}
+
+func (m *Ports) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Ports.Unmarshal(m, b)
+}
+func (m *Ports) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Ports.Marshal(b, m, deterministic)
+}
+func (m *Ports) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Ports.Merge(m, src)
+}
+func (m *Ports) XXX_Size() int {
+	return xxx_messageInfo_Ports.Size(m)
+}
+func (m *Ports) XXX_DiscardUnknown() {
+	xxx_messageInfo_Ports.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Ports proto.InternalMessageInfo
+
+func (m *Ports) GetItems() []*Port {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+// A Physical Device instance
+type Device struct {
+	// Voltha's device identifier
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Device type, refers to one of the registered device types
+	Type string `protobuf:"bytes,2,opt,name=type,proto3" json:"type,omitempty"`
+	// Is this device a root device. Each logical switch has one root
+	// device that is associated with the logical flow switch.
+	Root bool `protobuf:"varint,3,opt,name=root,proto3" json:"root,omitempty"`
+	// Parent device id, in the device tree (for a root device, the parent_id
+	// is the logical_device.id)
+	ParentId     string `protobuf:"bytes,4,opt,name=parent_id,json=parentId,proto3" json:"parent_id,omitempty"`
+	ParentPortNo uint32 `protobuf:"varint,20,opt,name=parent_port_no,json=parentPortNo,proto3" json:"parent_port_no,omitempty"`
+	// Vendor, version, serial number, etc.
+	Vendor          string `protobuf:"bytes,5,opt,name=vendor,proto3" json:"vendor,omitempty"`
+	Model           string `protobuf:"bytes,6,opt,name=model,proto3" json:"model,omitempty"`
+	HardwareVersion string `protobuf:"bytes,7,opt,name=hardware_version,json=hardwareVersion,proto3" json:"hardware_version,omitempty"`
+	FirmwareVersion string `protobuf:"bytes,8,opt,name=firmware_version,json=firmwareVersion,proto3" json:"firmware_version,omitempty"`
+	// List of software on the device
+	Images       *Images `protobuf:"bytes,9,opt,name=images,proto3" json:"images,omitempty"`
+	SerialNumber string  `protobuf:"bytes,10,opt,name=serial_number,json=serialNumber,proto3" json:"serial_number,omitempty"`
+	VendorId     string  `protobuf:"bytes,24,opt,name=vendor_id,json=vendorId,proto3" json:"vendor_id,omitempty"`
+	// Indicates how to reach the adapter instance that manages this device
+	AdapterEndpoint string `protobuf:"bytes,25,opt,name=adapter_endpoint,json=adapterEndpoint,proto3" json:"adapter_endpoint,omitempty"`
+	// Device contact on vlan (if 0, no vlan)
+	Vlan uint32 `protobuf:"varint,12,opt,name=vlan,proto3" json:"vlan,omitempty"`
+	// Device contact MAC address (format: "xx:xx:xx:xx:xx:xx")
+	MacAddress string `protobuf:"bytes,13,opt,name=mac_address,json=macAddress,proto3" json:"mac_address,omitempty"`
+	// Types that are valid to be assigned to Address:
+	//	*Device_Ipv4Address
+	//	*Device_Ipv6Address
+	//	*Device_HostAndPort
+	Address       isDevice_Address           `protobuf_oneof:"address"`
+	ExtraArgs     string                     `protobuf:"bytes,23,opt,name=extra_args,json=extraArgs,proto3" json:"extra_args,omitempty"`
+	ProxyAddress  *Device_ProxyAddress       `protobuf:"bytes,19,opt,name=proxy_address,json=proxyAddress,proto3" json:"proxy_address,omitempty"`
+	AdminState    common.AdminState_Types    `protobuf:"varint,16,opt,name=admin_state,json=adminState,proto3,enum=common.AdminState_Types" json:"admin_state,omitempty"`
+	OperStatus    common.OperStatus_Types    `protobuf:"varint,17,opt,name=oper_status,json=operStatus,proto3,enum=common.OperStatus_Types" json:"oper_status,omitempty"`
+	Reason        string                     `protobuf:"bytes,22,opt,name=reason,proto3" json:"reason,omitempty"`
+	ConnectStatus common.ConnectStatus_Types `protobuf:"varint,18,opt,name=connect_status,json=connectStatus,proto3,enum=common.ConnectStatus_Types" json:"connect_status,omitempty"`
+	// Device type specific attributes
+	Custom *any.Any `protobuf:"bytes,64,opt,name=custom,proto3" json:"custom,omitempty"`
+	// PmConfigs type
+	PmConfigs            *PmConfigs       `protobuf:"bytes,131,opt,name=pm_configs,json=pmConfigs,proto3" json:"pm_configs,omitempty"`
+	ImageDownloads       []*ImageDownload `protobuf:"bytes,133,rep,name=image_downloads,json=imageDownloads,proto3" json:"image_downloads,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *Device) Reset()         { *m = Device{} }
+func (m *Device) String() string { return proto.CompactTextString(m) }
+func (*Device) ProtoMessage()    {}
+func (*Device) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{15}
+}
+
+func (m *Device) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Device.Unmarshal(m, b)
+}
+func (m *Device) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Device.Marshal(b, m, deterministic)
+}
+func (m *Device) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Device.Merge(m, src)
+}
+func (m *Device) XXX_Size() int {
+	return xxx_messageInfo_Device.Size(m)
+}
+func (m *Device) XXX_DiscardUnknown() {
+	xxx_messageInfo_Device.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Device proto.InternalMessageInfo
+
+func (m *Device) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *Device) GetType() string {
+	if m != nil {
+		return m.Type
+	}
+	return ""
+}
+
+func (m *Device) GetRoot() bool {
+	if m != nil {
+		return m.Root
+	}
+	return false
+}
+
+func (m *Device) GetParentId() string {
+	if m != nil {
+		return m.ParentId
+	}
+	return ""
+}
+
+func (m *Device) GetParentPortNo() uint32 {
+	if m != nil {
+		return m.ParentPortNo
+	}
+	return 0
+}
+
+func (m *Device) GetVendor() string {
+	if m != nil {
+		return m.Vendor
+	}
+	return ""
+}
+
+func (m *Device) GetModel() string {
+	if m != nil {
+		return m.Model
+	}
+	return ""
+}
+
+func (m *Device) GetHardwareVersion() string {
+	if m != nil {
+		return m.HardwareVersion
+	}
+	return ""
+}
+
+func (m *Device) GetFirmwareVersion() string {
+	if m != nil {
+		return m.FirmwareVersion
+	}
+	return ""
+}
+
+func (m *Device) GetImages() *Images {
+	if m != nil {
+		return m.Images
+	}
+	return nil
+}
+
+func (m *Device) GetSerialNumber() string {
+	if m != nil {
+		return m.SerialNumber
+	}
+	return ""
+}
+
+func (m *Device) GetVendorId() string {
+	if m != nil {
+		return m.VendorId
+	}
+	return ""
+}
+
+func (m *Device) GetAdapterEndpoint() string {
+	if m != nil {
+		return m.AdapterEndpoint
+	}
+	return ""
+}
+
+func (m *Device) GetVlan() uint32 {
+	if m != nil {
+		return m.Vlan
+	}
+	return 0
+}
+
+func (m *Device) GetMacAddress() string {
+	if m != nil {
+		return m.MacAddress
+	}
+	return ""
+}
+
+type isDevice_Address interface {
+	isDevice_Address()
+}
+
+type Device_Ipv4Address struct {
+	Ipv4Address string `protobuf:"bytes,14,opt,name=ipv4_address,json=ipv4Address,proto3,oneof"`
+}
+
+type Device_Ipv6Address struct {
+	Ipv6Address string `protobuf:"bytes,15,opt,name=ipv6_address,json=ipv6Address,proto3,oneof"`
+}
+
+type Device_HostAndPort struct {
+	HostAndPort string `protobuf:"bytes,21,opt,name=host_and_port,json=hostAndPort,proto3,oneof"`
+}
+
+func (*Device_Ipv4Address) isDevice_Address() {}
+
+func (*Device_Ipv6Address) isDevice_Address() {}
+
+func (*Device_HostAndPort) isDevice_Address() {}
+
+func (m *Device) GetAddress() isDevice_Address {
+	if m != nil {
+		return m.Address
+	}
+	return nil
+}
+
+func (m *Device) GetIpv4Address() string {
+	if x, ok := m.GetAddress().(*Device_Ipv4Address); ok {
+		return x.Ipv4Address
+	}
+	return ""
+}
+
+func (m *Device) GetIpv6Address() string {
+	if x, ok := m.GetAddress().(*Device_Ipv6Address); ok {
+		return x.Ipv6Address
+	}
+	return ""
+}
+
+func (m *Device) GetHostAndPort() string {
+	if x, ok := m.GetAddress().(*Device_HostAndPort); ok {
+		return x.HostAndPort
+	}
+	return ""
+}
+
+func (m *Device) GetExtraArgs() string {
+	if m != nil {
+		return m.ExtraArgs
+	}
+	return ""
+}
+
+func (m *Device) GetProxyAddress() *Device_ProxyAddress {
+	if m != nil {
+		return m.ProxyAddress
+	}
+	return nil
+}
+
+func (m *Device) GetAdminState() common.AdminState_Types {
+	if m != nil {
+		return m.AdminState
+	}
+	return common.AdminState_UNKNOWN
+}
+
+func (m *Device) GetOperStatus() common.OperStatus_Types {
+	if m != nil {
+		return m.OperStatus
+	}
+	return common.OperStatus_UNKNOWN
+}
+
+func (m *Device) GetReason() string {
+	if m != nil {
+		return m.Reason
+	}
+	return ""
+}
+
+func (m *Device) GetConnectStatus() common.ConnectStatus_Types {
+	if m != nil {
+		return m.ConnectStatus
+	}
+	return common.ConnectStatus_UNKNOWN
+}
+
+func (m *Device) GetCustom() *any.Any {
+	if m != nil {
+		return m.Custom
+	}
+	return nil
+}
+
+func (m *Device) GetPmConfigs() *PmConfigs {
+	if m != nil {
+		return m.PmConfigs
+	}
+	return nil
+}
+
+func (m *Device) GetImageDownloads() []*ImageDownload {
+	if m != nil {
+		return m.ImageDownloads
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Device) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Device_Ipv4Address)(nil),
+		(*Device_Ipv6Address)(nil),
+		(*Device_HostAndPort)(nil),
+	}
+}
+
+type Device_ProxyAddress struct {
+	DeviceId             string   `protobuf:"bytes,1,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	DeviceType           string   `protobuf:"bytes,2,opt,name=device_type,json=deviceType,proto3" json:"device_type,omitempty"`
+	ChannelId            uint32   `protobuf:"varint,3,opt,name=channel_id,json=channelId,proto3" json:"channel_id,omitempty"`
+	ChannelGroupId       uint32   `protobuf:"varint,4,opt,name=channel_group_id,json=channelGroupId,proto3" json:"channel_group_id,omitempty"`
+	ChannelTermination   string   `protobuf:"bytes,5,opt,name=channel_termination,json=channelTermination,proto3" json:"channel_termination,omitempty"`
+	OnuId                uint32   `protobuf:"varint,6,opt,name=onu_id,json=onuId,proto3" json:"onu_id,omitempty"`
+	OnuSessionId         uint32   `protobuf:"varint,7,opt,name=onu_session_id,json=onuSessionId,proto3" json:"onu_session_id,omitempty"`
+	AdapterEndpoint      string   `protobuf:"bytes,8,opt,name=adapter_endpoint,json=adapterEndpoint,proto3" json:"adapter_endpoint,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *Device_ProxyAddress) Reset()         { *m = Device_ProxyAddress{} }
+func (m *Device_ProxyAddress) String() string { return proto.CompactTextString(m) }
+func (*Device_ProxyAddress) ProtoMessage()    {}
+func (*Device_ProxyAddress) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{15, 0}
+}
+
+func (m *Device_ProxyAddress) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Device_ProxyAddress.Unmarshal(m, b)
+}
+func (m *Device_ProxyAddress) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Device_ProxyAddress.Marshal(b, m, deterministic)
+}
+func (m *Device_ProxyAddress) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Device_ProxyAddress.Merge(m, src)
+}
+func (m *Device_ProxyAddress) XXX_Size() int {
+	return xxx_messageInfo_Device_ProxyAddress.Size(m)
+}
+func (m *Device_ProxyAddress) XXX_DiscardUnknown() {
+	xxx_messageInfo_Device_ProxyAddress.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Device_ProxyAddress proto.InternalMessageInfo
+
+func (m *Device_ProxyAddress) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *Device_ProxyAddress) GetDeviceType() string {
+	if m != nil {
+		return m.DeviceType
+	}
+	return ""
+}
+
+func (m *Device_ProxyAddress) GetChannelId() uint32 {
+	if m != nil {
+		return m.ChannelId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetChannelGroupId() uint32 {
+	if m != nil {
+		return m.ChannelGroupId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetChannelTermination() string {
+	if m != nil {
+		return m.ChannelTermination
+	}
+	return ""
+}
+
+func (m *Device_ProxyAddress) GetOnuId() uint32 {
+	if m != nil {
+		return m.OnuId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetOnuSessionId() uint32 {
+	if m != nil {
+		return m.OnuSessionId
+	}
+	return 0
+}
+
+func (m *Device_ProxyAddress) GetAdapterEndpoint() string {
+	if m != nil {
+		return m.AdapterEndpoint
+	}
+	return ""
+}
+
+type DeviceImageDownloadRequest struct {
+	// Device Id
+	// allows for operations on multiple devices.
+	DeviceId []*common.ID `protobuf:"bytes,1,rep,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	//The image for the device containing all the information
+	Image *Image `protobuf:"bytes,2,opt,name=image,proto3" json:"image,omitempty"`
+	//Activate the image if the download to the device is successful
+	ActivateOnSuccess bool `protobuf:"varint,3,opt,name=activateOnSuccess,proto3" json:"activateOnSuccess,omitempty"`
+	//Automatically commit the image if the activation on the device is successful
+	CommitOnSuccess      bool     `protobuf:"varint,4,opt,name=commitOnSuccess,proto3" json:"commitOnSuccess,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeviceImageDownloadRequest) Reset()         { *m = DeviceImageDownloadRequest{} }
+func (m *DeviceImageDownloadRequest) String() string { return proto.CompactTextString(m) }
+func (*DeviceImageDownloadRequest) ProtoMessage()    {}
+func (*DeviceImageDownloadRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{16}
+}
+
+func (m *DeviceImageDownloadRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceImageDownloadRequest.Unmarshal(m, b)
+}
+func (m *DeviceImageDownloadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceImageDownloadRequest.Marshal(b, m, deterministic)
+}
+func (m *DeviceImageDownloadRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceImageDownloadRequest.Merge(m, src)
+}
+func (m *DeviceImageDownloadRequest) XXX_Size() int {
+	return xxx_messageInfo_DeviceImageDownloadRequest.Size(m)
+}
+func (m *DeviceImageDownloadRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceImageDownloadRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceImageDownloadRequest proto.InternalMessageInfo
+
+func (m *DeviceImageDownloadRequest) GetDeviceId() []*common.ID {
+	if m != nil {
+		return m.DeviceId
+	}
+	return nil
+}
+
+func (m *DeviceImageDownloadRequest) GetImage() *Image {
+	if m != nil {
+		return m.Image
+	}
+	return nil
+}
+
+func (m *DeviceImageDownloadRequest) GetActivateOnSuccess() bool {
+	if m != nil {
+		return m.ActivateOnSuccess
+	}
+	return false
+}
+
+func (m *DeviceImageDownloadRequest) GetCommitOnSuccess() bool {
+	if m != nil {
+		return m.CommitOnSuccess
+	}
+	return false
+}
+
+type DeviceImageRequest struct {
+	//Device Id
+	//allows for operations on multiple adapters.
+	DeviceId []*common.ID `protobuf:"bytes,1,rep,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	// Image Version, this is the sole identifier of the image. it's the vendor specified OMCI version
+	// must be known at the time of initiating a download, activate, commit image on an onu.
+	Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"`
+	//Automatically commit the image if the activation on the device is successful
+	CommitOnSuccess      bool     `protobuf:"varint,3,opt,name=commitOnSuccess,proto3" json:"commitOnSuccess,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *DeviceImageRequest) Reset()         { *m = DeviceImageRequest{} }
+func (m *DeviceImageRequest) String() string { return proto.CompactTextString(m) }
+func (*DeviceImageRequest) ProtoMessage()    {}
+func (*DeviceImageRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{17}
+}
+
+func (m *DeviceImageRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceImageRequest.Unmarshal(m, b)
+}
+func (m *DeviceImageRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceImageRequest.Marshal(b, m, deterministic)
+}
+func (m *DeviceImageRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceImageRequest.Merge(m, src)
+}
+func (m *DeviceImageRequest) XXX_Size() int {
+	return xxx_messageInfo_DeviceImageRequest.Size(m)
+}
+func (m *DeviceImageRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceImageRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceImageRequest proto.InternalMessageInfo
+
+func (m *DeviceImageRequest) GetDeviceId() []*common.ID {
+	if m != nil {
+		return m.DeviceId
+	}
+	return nil
+}
+
+func (m *DeviceImageRequest) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *DeviceImageRequest) GetCommitOnSuccess() bool {
+	if m != nil {
+		return m.CommitOnSuccess
+	}
+	return false
+}
+
+type DeviceImageResponse struct {
+	//Image state for the different devices
+	DeviceImageStates    []*DeviceImageState `protobuf:"bytes,1,rep,name=device_image_states,json=deviceImageStates,proto3" json:"device_image_states,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}            `json:"-"`
+	XXX_unrecognized     []byte              `json:"-"`
+	XXX_sizecache        int32               `json:"-"`
+}
+
+func (m *DeviceImageResponse) Reset()         { *m = DeviceImageResponse{} }
+func (m *DeviceImageResponse) String() string { return proto.CompactTextString(m) }
+func (*DeviceImageResponse) ProtoMessage()    {}
+func (*DeviceImageResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{18}
+}
+
+func (m *DeviceImageResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceImageResponse.Unmarshal(m, b)
+}
+func (m *DeviceImageResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceImageResponse.Marshal(b, m, deterministic)
+}
+func (m *DeviceImageResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceImageResponse.Merge(m, src)
+}
+func (m *DeviceImageResponse) XXX_Size() int {
+	return xxx_messageInfo_DeviceImageResponse.Size(m)
+}
+func (m *DeviceImageResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceImageResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceImageResponse proto.InternalMessageInfo
+
+func (m *DeviceImageResponse) GetDeviceImageStates() []*DeviceImageState {
+	if m != nil {
+		return m.DeviceImageStates
+	}
+	return nil
+}
+
+// Device Self Test Response
+type SelfTestResponse struct {
+	Result               SelfTestResponse_SelfTestResult `protobuf:"varint,1,opt,name=result,proto3,enum=device.SelfTestResponse_SelfTestResult" json:"result,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                        `json:"-"`
+	XXX_unrecognized     []byte                          `json:"-"`
+	XXX_sizecache        int32                           `json:"-"`
+}
+
+func (m *SelfTestResponse) Reset()         { *m = SelfTestResponse{} }
+func (m *SelfTestResponse) String() string { return proto.CompactTextString(m) }
+func (*SelfTestResponse) ProtoMessage()    {}
+func (*SelfTestResponse) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{19}
+}
+
+func (m *SelfTestResponse) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SelfTestResponse.Unmarshal(m, b)
+}
+func (m *SelfTestResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SelfTestResponse.Marshal(b, m, deterministic)
+}
+func (m *SelfTestResponse) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SelfTestResponse.Merge(m, src)
+}
+func (m *SelfTestResponse) XXX_Size() int {
+	return xxx_messageInfo_SelfTestResponse.Size(m)
+}
+func (m *SelfTestResponse) XXX_DiscardUnknown() {
+	xxx_messageInfo_SelfTestResponse.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SelfTestResponse proto.InternalMessageInfo
+
+func (m *SelfTestResponse) GetResult() SelfTestResponse_SelfTestResult {
+	if m != nil {
+		return m.Result
+	}
+	return SelfTestResponse_SUCCESS
+}
+
+type Devices struct {
+	Items                []*Device `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}  `json:"-"`
+	XXX_unrecognized     []byte    `json:"-"`
+	XXX_sizecache        int32     `json:"-"`
+}
+
+func (m *Devices) Reset()         { *m = Devices{} }
+func (m *Devices) String() string { return proto.CompactTextString(m) }
+func (*Devices) ProtoMessage()    {}
+func (*Devices) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{20}
+}
+
+func (m *Devices) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Devices.Unmarshal(m, b)
+}
+func (m *Devices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Devices.Marshal(b, m, deterministic)
+}
+func (m *Devices) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Devices.Merge(m, src)
+}
+func (m *Devices) XXX_Size() int {
+	return xxx_messageInfo_Devices.Size(m)
+}
+func (m *Devices) XXX_DiscardUnknown() {
+	xxx_messageInfo_Devices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Devices proto.InternalMessageInfo
+
+func (m *Devices) GetItems() []*Device {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type SimulateAlarmRequest struct {
+	// Device Identifier
+	Id                   string                             `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Indicator            string                             `protobuf:"bytes,2,opt,name=indicator,proto3" json:"indicator,omitempty"`
+	IntfId               string                             `protobuf:"bytes,3,opt,name=intf_id,json=intfId,proto3" json:"intf_id,omitempty"`
+	PortTypeName         string                             `protobuf:"bytes,4,opt,name=port_type_name,json=portTypeName,proto3" json:"port_type_name,omitempty"`
+	OnuDeviceId          string                             `protobuf:"bytes,5,opt,name=onu_device_id,json=onuDeviceId,proto3" json:"onu_device_id,omitempty"`
+	InverseBitErrorRate  int32                              `protobuf:"varint,6,opt,name=inverse_bit_error_rate,json=inverseBitErrorRate,proto3" json:"inverse_bit_error_rate,omitempty"`
+	Drift                int32                              `protobuf:"varint,7,opt,name=drift,proto3" json:"drift,omitempty"`
+	NewEqd               int32                              `protobuf:"varint,8,opt,name=new_eqd,json=newEqd,proto3" json:"new_eqd,omitempty"`
+	OnuSerialNumber      string                             `protobuf:"bytes,9,opt,name=onu_serial_number,json=onuSerialNumber,proto3" json:"onu_serial_number,omitempty"`
+	Operation            SimulateAlarmRequest_OperationType `protobuf:"varint,10,opt,name=operation,proto3,enum=device.SimulateAlarmRequest_OperationType" json:"operation,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                           `json:"-"`
+	XXX_unrecognized     []byte                             `json:"-"`
+	XXX_sizecache        int32                              `json:"-"`
+}
+
+func (m *SimulateAlarmRequest) Reset()         { *m = SimulateAlarmRequest{} }
+func (m *SimulateAlarmRequest) String() string { return proto.CompactTextString(m) }
+func (*SimulateAlarmRequest) ProtoMessage()    {}
+func (*SimulateAlarmRequest) Descriptor() ([]byte, []int) {
+	return fileDescriptor_200940f73d155856, []int{21}
+}
+
+func (m *SimulateAlarmRequest) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_SimulateAlarmRequest.Unmarshal(m, b)
+}
+func (m *SimulateAlarmRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_SimulateAlarmRequest.Marshal(b, m, deterministic)
+}
+func (m *SimulateAlarmRequest) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_SimulateAlarmRequest.Merge(m, src)
+}
+func (m *SimulateAlarmRequest) XXX_Size() int {
+	return xxx_messageInfo_SimulateAlarmRequest.Size(m)
+}
+func (m *SimulateAlarmRequest) XXX_DiscardUnknown() {
+	xxx_messageInfo_SimulateAlarmRequest.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_SimulateAlarmRequest proto.InternalMessageInfo
+
+func (m *SimulateAlarmRequest) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetIndicator() string {
+	if m != nil {
+		return m.Indicator
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetIntfId() string {
+	if m != nil {
+		return m.IntfId
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetPortTypeName() string {
+	if m != nil {
+		return m.PortTypeName
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetOnuDeviceId() string {
+	if m != nil {
+		return m.OnuDeviceId
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetInverseBitErrorRate() int32 {
+	if m != nil {
+		return m.InverseBitErrorRate
+	}
+	return 0
+}
+
+func (m *SimulateAlarmRequest) GetDrift() int32 {
+	if m != nil {
+		return m.Drift
+	}
+	return 0
+}
+
+func (m *SimulateAlarmRequest) GetNewEqd() int32 {
+	if m != nil {
+		return m.NewEqd
+	}
+	return 0
+}
+
+func (m *SimulateAlarmRequest) GetOnuSerialNumber() string {
+	if m != nil {
+		return m.OnuSerialNumber
+	}
+	return ""
+}
+
+func (m *SimulateAlarmRequest) GetOperation() SimulateAlarmRequest_OperationType {
+	if m != nil {
+		return m.Operation
+	}
+	return SimulateAlarmRequest_RAISE
+}
+
+func init() {
+	proto.RegisterEnum("device.PmConfig_PmType", PmConfig_PmType_name, PmConfig_PmType_value)
+	proto.RegisterEnum("device.ImageDownload_ImageDownloadState", ImageDownload_ImageDownloadState_name, ImageDownload_ImageDownloadState_value)
+	proto.RegisterEnum("device.ImageDownload_ImageDownloadFailureReason", ImageDownload_ImageDownloadFailureReason_name, ImageDownload_ImageDownloadFailureReason_value)
+	proto.RegisterEnum("device.ImageDownload_ImageActivateState", ImageDownload_ImageActivateState_name, ImageDownload_ImageActivateState_value)
+	proto.RegisterEnum("device.ImageState_ImageDownloadState", ImageState_ImageDownloadState_name, ImageState_ImageDownloadState_value)
+	proto.RegisterEnum("device.ImageState_ImageFailureReason", ImageState_ImageFailureReason_name, ImageState_ImageFailureReason_value)
+	proto.RegisterEnum("device.ImageState_ImageActivationState", ImageState_ImageActivationState_name, ImageState_ImageActivationState_value)
+	proto.RegisterEnum("device.Port_PortType", Port_PortType_name, Port_PortType_value)
+	proto.RegisterEnum("device.SelfTestResponse_SelfTestResult", SelfTestResponse_SelfTestResult_name, SelfTestResponse_SelfTestResult_value)
+	proto.RegisterEnum("device.SimulateAlarmRequest_OperationType", SimulateAlarmRequest_OperationType_name, SimulateAlarmRequest_OperationType_value)
+	proto.RegisterType((*DeviceType)(nil), "device.DeviceType")
+	proto.RegisterType((*DeviceTypes)(nil), "device.DeviceTypes")
+	proto.RegisterType((*PmConfig)(nil), "device.PmConfig")
+	proto.RegisterType((*PmGroupConfig)(nil), "device.PmGroupConfig")
+	proto.RegisterType((*PmConfigs)(nil), "device.PmConfigs")
+	proto.RegisterType((*Image)(nil), "device.Image")
+	proto.RegisterType((*ImageDownload)(nil), "device.ImageDownload")
+	proto.RegisterType((*ImageDownloads)(nil), "device.ImageDownloads")
+	proto.RegisterType((*Images)(nil), "device.Images")
+	proto.RegisterType((*OnuImage)(nil), "device.OnuImage")
+	proto.RegisterType((*OnuImages)(nil), "device.OnuImages")
+	proto.RegisterType((*DeviceImageState)(nil), "device.DeviceImageState")
+	proto.RegisterType((*ImageState)(nil), "device.ImageState")
+	proto.RegisterType((*Port)(nil), "device.Port")
+	proto.RegisterType((*Port_PeerPort)(nil), "device.Port.PeerPort")
+	proto.RegisterType((*Ports)(nil), "device.Ports")
+	proto.RegisterType((*Device)(nil), "device.Device")
+	proto.RegisterType((*Device_ProxyAddress)(nil), "device.Device.ProxyAddress")
+	proto.RegisterType((*DeviceImageDownloadRequest)(nil), "device.DeviceImageDownloadRequest")
+	proto.RegisterType((*DeviceImageRequest)(nil), "device.DeviceImageRequest")
+	proto.RegisterType((*DeviceImageResponse)(nil), "device.DeviceImageResponse")
+	proto.RegisterType((*SelfTestResponse)(nil), "device.SelfTestResponse")
+	proto.RegisterType((*Devices)(nil), "device.Devices")
+	proto.RegisterType((*SimulateAlarmRequest)(nil), "device.SimulateAlarmRequest")
+}
+
+func init() { proto.RegisterFile("voltha_protos/device.proto", fileDescriptor_200940f73d155856) }
+
+var fileDescriptor_200940f73d155856 = []byte{
+	// 2853 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xcc, 0x59, 0x4d, 0x73, 0xdb, 0xc6,
+	0xf9, 0x37, 0x49, 0xf1, 0x05, 0x0f, 0x5f, 0x04, 0xad, 0x24, 0x1b, 0x96, 0xe2, 0xbf, 0x1d, 0x38,
+	0x2f, 0x72, 0x9c, 0x48, 0x89, 0xfd, 0x6f, 0xd2, 0x76, 0xa6, 0x13, 0x53, 0x24, 0x64, 0xb1, 0x95,
+	0x41, 0x75, 0x49, 0x2a, 0x4d, 0x2e, 0x18, 0x88, 0x58, 0x4a, 0x18, 0x83, 0x00, 0xbd, 0x00, 0x25,
+	0x39, 0xb7, 0x4e, 0xa7, 0x3d, 0x75, 0xa6, 0x87, 0x9e, 0xfa, 0x01, 0x3a, 0xd3, 0x53, 0xa7, 0xb7,
+	0x5c, 0x3b, 0xfd, 0x04, 0x3d, 0xf6, 0xdc, 0x1e, 0xfa, 0x01, 0xda, 0x0f, 0xd0, 0xd9, 0x37, 0x02,
+	0x90, 0x64, 0x27, 0xe9, 0x74, 0x3a, 0xbd, 0x70, 0xb0, 0xbf, 0xe7, 0xd9, 0x67, 0x77, 0x9f, 0x7d,
+	0x5e, 0x97, 0xb0, 0x71, 0x16, 0x05, 0xc9, 0xa9, 0xeb, 0xcc, 0x68, 0x94, 0x44, 0xf1, 0x8e, 0x47,
+	0xce, 0xfc, 0x31, 0xd9, 0xe6, 0x23, 0x54, 0x11, 0xa3, 0x8d, 0xdb, 0x27, 0x51, 0x74, 0x12, 0x90,
+	0x1d, 0x8e, 0x1e, 0xcf, 0x27, 0x3b, 0x6e, 0xf8, 0x52, 0xb0, 0x6c, 0x5c, 0x9a, 0x3e, 0x8e, 0xa6,
+	0xd3, 0x28, 0x94, 0xb4, 0xbb, 0x79, 0x5a, 0x34, 0x23, 0xe1, 0x24, 0x88, 0xce, 0x9d, 0x8f, 0x1e,
+	0x0b, 0x06, 0xf3, 0xaf, 0x45, 0x80, 0x2e, 0x5f, 0x62, 0xf8, 0x72, 0x46, 0x50, 0x0b, 0x8a, 0xbe,
+	0x67, 0x14, 0xee, 0x15, 0xb6, 0x34, 0x5c, 0xf4, 0x3d, 0xb4, 0x09, 0xda, 0x19, 0x09, 0xbd, 0x88,
+	0x3a, 0xbe, 0x67, 0x94, 0x39, 0x5c, 0x13, 0x40, 0xcf, 0x43, 0x77, 0x00, 0x16, 0xc4, 0xd8, 0xa8,
+	0xdc, 0x2b, 0x6d, 0x69, 0x58, 0x53, 0xd4, 0x18, 0x19, 0x50, 0x75, 0x3d, 0x77, 0x96, 0x10, 0x6a,
+	0x14, 0xf9, 0x4c, 0x35, 0x44, 0x9f, 0x80, 0xe1, 0x8e, 0xc7, 0x64, 0x96, 0xc4, 0xce, 0xf1, 0x3c,
+	0x78, 0xee, 0xf0, 0x2d, 0xcd, 0x67, 0x9e, 0x9b, 0x10, 0xa3, 0x74, 0xaf, 0xb0, 0x55, 0xc3, 0xeb,
+	0x92, 0xbe, 0x3b, 0x0f, 0x9e, 0xef, 0x05, 0xd1, 0xf9, 0x88, 0x13, 0x51, 0x17, 0xee, 0xaa, 0x89,
+	0xae, 0xe7, 0x39, 0x94, 0x4c, 0xa3, 0x33, 0x92, 0x9d, 0x1e, 0x1b, 0x4b, 0x7c, 0xfe, 0xa6, 0x64,
+	0x6b, 0x7b, 0x1e, 0xe6, 0x4c, 0xa9, 0x90, 0x18, 0x1d, 0xc0, 0x7d, 0x25, 0xc5, 0xf3, 0x29, 0x19,
+	0x27, 0x4e, 0x10, 0x9d, 0xf8, 0x63, 0x37, 0xe0, 0x92, 0x62, 0xb5, 0x93, 0x2a, 0x97, 0xa4, 0x16,
+	0xec, 0x72, 0xce, 0x03, 0xc1, 0xc8, 0xa4, 0xc5, 0x72, 0x4f, 0x6f, 0x42, 0x43, 0x9e, 0xcb, 0x49,
+	0x5e, 0xce, 0x88, 0x51, 0xe3, 0x67, 0xad, 0x4b, 0x8c, 0x69, 0xd5, 0xfc, 0x04, 0xea, 0xa9, 0x8e,
+	0x63, 0xb4, 0x05, 0x65, 0x3f, 0x21, 0xd3, 0xd8, 0x28, 0xdc, 0x2b, 0x6d, 0xd5, 0x1f, 0xa1, 0x6d,
+	0x79, 0xe3, 0x29, 0x0f, 0x16, 0x0c, 0xe6, 0x1f, 0x0b, 0x50, 0x3b, 0x9c, 0x76, 0xa2, 0x70, 0xe2,
+	0x9f, 0x20, 0x04, 0x4b, 0xa1, 0x3b, 0x25, 0xf2, 0x76, 0xf8, 0x37, 0x7a, 0x08, 0x4b, 0x7c, 0x51,
+	0xa6, 0xe0, 0xd6, 0xa3, 0x5b, 0x4a, 0x92, 0x9a, 0xb3, 0x7d, 0x38, 0xe5, 0xe2, 0x38, 0x13, 0xbb,
+	0x10, 0x12, 0xba, 0xc7, 0x01, 0xf1, 0xa4, 0x96, 0xd5, 0x10, 0xdd, 0x85, 0x7a, 0xec, 0x4e, 0x67,
+	0x01, 0x71, 0x26, 0x94, 0xbc, 0xe0, 0x3a, 0x6c, 0x62, 0x10, 0xd0, 0x1e, 0x25, 0x2f, 0xcc, 0xef,
+	0x42, 0x45, 0x88, 0x42, 0x75, 0xa8, 0x76, 0xfa, 0x23, 0x7b, 0x68, 0x61, 0xfd, 0x06, 0xd2, 0xa0,
+	0xfc, 0xb4, 0x3d, 0x7a, 0x6a, 0xe9, 0x05, 0xf6, 0x39, 0x18, 0xb6, 0x87, 0x96, 0x5e, 0x14, 0x2c,
+	0xf6, 0xd0, 0xfa, 0xc9, 0x50, 0x2f, 0x99, 0xbf, 0x2e, 0x40, 0xf3, 0x70, 0xfa, 0x94, 0x46, 0xf3,
+	0x99, 0x3c, 0xc7, 0x1d, 0x80, 0x13, 0x36, 0x74, 0x32, 0xa7, 0xd1, 0x38, 0x62, 0xb3, 0x23, 0x2d,
+	0xc8, 0x7c, 0x2b, 0x45, 0xbe, 0x15, 0x41, 0x66, 0x3b, 0x79, 0xcd, 0x21, 0xde, 0x83, 0xea, 0x94,
+	0x24, 0xd4, 0x1f, 0x33, 0x23, 0x60, 0x8a, 0xd5, 0x2f, 0xab, 0x03, 0x2b, 0x06, 0xf3, 0x1f, 0x05,
+	0xd0, 0x14, 0x1a, 0x5f, 0xb1, 0xfa, 0x37, 0xa1, 0xe1, 0x91, 0x89, 0x3b, 0x0f, 0x92, 0xec, 0x26,
+	0xea, 0x12, 0x53, 0xdb, 0xe0, 0x7b, 0x4a, 0xb7, 0x21, 0x87, 0xe8, 0x3e, 0x34, 0xd9, 0x24, 0x27,
+	0x3a, 0x23, 0x94, 0xfa, 0x1e, 0x91, 0x16, 0xd9, 0x60, 0x60, 0x5f, 0x62, 0xe8, 0x03, 0xa8, 0x70,
+	0xfe, 0xd8, 0x28, 0xf3, 0xad, 0xae, 0xa7, 0x5b, 0xcd, 0xa8, 0x0a, 0x4b, 0xa6, 0xec, 0xd1, 0x2a,
+	0x5f, 0x73, 0x34, 0x74, 0x1b, 0x6a, 0x53, 0xf7, 0xc2, 0x89, 0x9f, 0x93, 0x73, 0x6e, 0xc2, 0x4d,
+	0x5c, 0x9d, 0xba, 0x17, 0x83, 0xe7, 0xe4, 0xdc, 0xfc, 0x55, 0x11, 0xca, 0xbd, 0xa9, 0x7b, 0x42,
+	0xae, 0xb5, 0x25, 0x03, 0xaa, 0x67, 0x84, 0xc6, 0x7e, 0x14, 0x2a, 0x7f, 0x95, 0x43, 0xc6, 0x7d,
+	0xea, 0xc6, 0xa7, 0xfc, 0xa4, 0x4d, 0xcc, 0xbf, 0xd1, 0x03, 0xd0, 0xfd, 0x30, 0x4e, 0xdc, 0x20,
+	0x70, 0x98, 0x1b, 0x24, 0xfe, 0x54, 0x9c, 0x54, 0xc3, 0xcb, 0x12, 0xef, 0x4a, 0x98, 0x05, 0x11,
+	0x3f, 0x76, 0xdc, 0x71, 0xe2, 0x9f, 0x11, 0x1e, 0x44, 0x6a, 0xb8, 0xe6, 0xc7, 0x6d, 0x3e, 0x66,
+	0xba, 0xf6, 0x63, 0x87, 0x05, 0x2d, 0x3f, 0x49, 0x88, 0x67, 0x54, 0x38, 0xbd, 0xee, 0xc7, 0x1d,
+	0x05, 0xb1, 0x13, 0xf9, 0xb1, 0x73, 0xe6, 0x06, 0xbe, 0x27, 0x9d, 0xb2, 0xea, 0xc7, 0x47, 0x6c,
+	0x88, 0x74, 0x28, 0xcd, 0x69, 0x20, 0x7d, 0x8e, 0x7d, 0xa2, 0x9b, 0x50, 0x11, 0x21, 0xc8, 0xd0,
+	0x38, 0x28, 0x47, 0x68, 0x0d, 0xca, 0x63, 0x3a, 0x7e, 0xfc, 0xc8, 0x00, 0x7e, 0x08, 0x31, 0x30,
+	0xff, 0x5e, 0x85, 0x26, 0xd7, 0x48, 0x37, 0x3a, 0x0f, 0x83, 0xc8, 0xf5, 0xae, 0xd8, 0x82, 0xd2,
+	0x54, 0x31, 0xa3, 0x29, 0xb9, 0x6a, 0x29, 0x5d, 0x55, 0x87, 0xd2, 0x98, 0x8e, 0xa5, 0xe3, 0xb0,
+	0x4f, 0xd4, 0x87, 0x96, 0x27, 0x65, 0x3a, 0x71, 0xc2, 0xe2, 0x49, 0x99, 0xfb, 0xe8, 0x96, 0xba,
+	0xb9, 0xdc, 0xb2, 0xf9, 0xd1, 0x80, 0xf1, 0xe3, 0xa6, 0x97, 0x1d, 0x32, 0xbb, 0xf2, 0x19, 0x93,
+	0xa3, 0x2e, 0xa9, 0xc2, 0x97, 0x6f, 0x70, 0xf0, 0x48, 0xde, 0xd4, 0x03, 0xd0, 0xd5, 0x2c, 0xe2,
+	0x39, 0xc7, 0x2f, 0x59, 0x44, 0x14, 0x46, 0xb0, 0x9c, 0xe2, 0xbb, 0x0c, 0x46, 0xfb, 0x50, 0xa1,
+	0xc4, 0x8d, 0xa3, 0x90, 0x6b, 0xaf, 0xf5, 0xe8, 0xc3, 0x6f, 0xb0, 0xb1, 0x3d, 0xd7, 0x0f, 0xe6,
+	0x94, 0x60, 0x3e, 0x0f, 0xcb, 0xf9, 0xe8, 0x5d, 0x58, 0x76, 0x3d, 0xcf, 0x4f, 0xfc, 0x28, 0x74,
+	0x03, 0xc7, 0x0f, 0x27, 0x91, 0xd4, 0x7d, 0x2b, 0x85, 0x7b, 0xe1, 0x24, 0x12, 0x61, 0xe6, 0x8c,
+	0x38, 0x63, 0x6e, 0xb2, 0xfc, 0x26, 0x6a, 0x2c, 0xcc, 0x9c, 0x11, 0x19, 0x1a, 0x36, 0x41, 0x0b,
+	0x22, 0x16, 0x88, 0x3d, 0x9f, 0x1a, 0x75, 0x91, 0x6e, 0x38, 0xd0, 0xf5, 0x29, 0xea, 0x41, 0x5d,
+	0x28, 0x40, 0xa8, 0xb3, 0xf1, 0xb5, 0xea, 0xe4, 0x16, 0xe6, 0x26, 0x44, 0xa8, 0x13, 0xf8, 0x64,
+	0xa1, 0xcb, 0x4d, 0xd0, 0x26, 0x7e, 0x40, 0x9c, 0xd8, 0xff, 0x92, 0x18, 0x4d, 0xae, 0x9f, 0x1a,
+	0x03, 0x06, 0xfe, 0x97, 0xc4, 0xfc, 0xaa, 0x00, 0xe8, 0xea, 0x75, 0xa0, 0x35, 0xd0, 0xbb, 0xfd,
+	0xcf, 0xec, 0x83, 0x7e, 0xbb, 0xeb, 0x8c, 0xec, 0x1f, 0xd9, 0xfd, 0xcf, 0x6c, 0xfd, 0x06, 0xba,
+	0x09, 0x68, 0x81, 0x0e, 0x46, 0x9d, 0x8e, 0x65, 0x75, 0xad, 0xae, 0x5e, 0xc8, 0xe1, 0xd8, 0xfa,
+	0xf1, 0xc8, 0x1a, 0x0c, 0xad, 0xae, 0x5e, 0xcc, 0x49, 0x19, 0x0c, 0xdb, 0x98, 0xa1, 0x25, 0xb4,
+	0x0a, 0xcb, 0x0b, 0x74, 0xaf, 0xdd, 0x3b, 0xb0, 0xba, 0xfa, 0x12, 0x32, 0x60, 0x2d, 0xb3, 0xe0,
+	0x60, 0x74, 0x78, 0xd8, 0xe7, 0xec, 0xe5, 0x9c, 0xf0, 0x4e, 0xdb, 0xee, 0x58, 0x07, 0x6c, 0x46,
+	0xc5, 0xfc, 0x45, 0x01, 0x36, 0x5e, 0x7d, 0x5f, 0xa8, 0x01, 0x35, 0xbb, 0xef, 0x58, 0x18, 0xf7,
+	0x59, 0xec, 0x5e, 0x86, 0x7a, 0xcf, 0x3e, 0x6a, 0x1f, 0xf4, 0xba, 0xce, 0x08, 0x1f, 0xe8, 0x05,
+	0x06, 0x74, 0xad, 0xa3, 0x5e, 0xc7, 0x72, 0x76, 0x47, 0x83, 0xcf, 0xf5, 0x22, 0x5b, 0xa6, 0x67,
+	0x0f, 0x46, 0x7b, 0x7b, 0xbd, 0x4e, 0xcf, 0xb2, 0x87, 0xce, 0xe0, 0xb0, 0xdd, 0xb1, 0xf4, 0x12,
+	0x5a, 0x81, 0xa6, 0x54, 0x80, 0x14, 0xb6, 0x84, 0x9a, 0xa0, 0xa5, 0x1b, 0x29, 0x9b, 0xbf, 0x54,
+	0x2a, 0xcc, 0x5d, 0x01, 0x9b, 0xd8, 0x7b, 0xd6, 0x7e, 0x6a, 0x65, 0xf4, 0x87, 0xa0, 0x25, 0xa0,
+	0x9e, 0xdd, 0xee, 0x0c, 0x7b, 0x47, 0x2c, 0x95, 0xac, 0x81, 0x2e, 0x30, 0x8e, 0xb4, 0x87, 0x3d,
+	0xfb, 0xa9, 0x5e, 0x44, 0x3a, 0x34, 0x32, 0xa8, 0x25, 0xb4, 0x26, 0x10, 0x6c, 0x1d, 0x59, 0x98,
+	0xb3, 0x2d, 0xa5, 0x02, 0x05, 0xc8, 0xb6, 0xf3, 0xfd, 0xa2, 0x51, 0x30, 0xdb, 0xd0, 0xca, 0xa9,
+	0x26, 0x46, 0x0f, 0x55, 0x1a, 0x2e, 0xe6, 0x43, 0x70, 0x8e, 0x4d, 0x66, 0x62, 0x2e, 0xe2, 0x03,
+	0xa8, 0x70, 0x5a, 0x8c, 0xee, 0x43, 0x99, 0x5b, 0x93, 0xcc, 0xe0, 0xcd, 0xdc, 0x54, 0x2c, 0x68,
+	0xe6, 0x1f, 0x0a, 0x50, 0xeb, 0x87, 0x73, 0x11, 0x70, 0x33, 0xc1, 0xb5, 0x90, 0x0f, 0xae, 0xff,
+	0x07, 0xa0, 0x82, 0x1d, 0xf1, 0x78, 0x98, 0xa9, 0xe1, 0x0c, 0x82, 0x36, 0x60, 0x11, 0x2c, 0x65,
+	0xaa, 0x49, 0x83, 0xa7, 0x01, 0x2a, 0x12, 0xca, 0x2c, 0xb3, 0x08, 0x8c, 0xf7, 0xa0, 0x3e, 0xa3,
+	0x91, 0x37, 0x1f, 0x27, 0x9d, 0xc8, 0x23, 0xb2, 0x74, 0xcb, 0x42, 0x8b, 0xa0, 0x2e, 0xc2, 0x08,
+	0xff, 0x36, 0x1f, 0x83, 0xa6, 0x76, 0x1c, 0xa3, 0x77, 0xf2, 0x65, 0xca, 0x22, 0xe5, 0x28, 0x0e,
+	0x55, 0xa4, 0x8c, 0x41, 0x17, 0x95, 0x4b, 0x2f, 0xe7, 0x60, 0x82, 0xdb, 0x59, 0x04, 0xd3, 0x9a,
+	0x00, 0x7a, 0x1e, 0x7a, 0x04, 0x19, 0x5f, 0xe4, 0x27, 0xce, 0x14, 0x41, 0xa9, 0x90, 0xac, 0xc7,
+	0x9a, 0x7f, 0xab, 0x02, 0x64, 0xe4, 0xbf, 0x5a, 0x9d, 0x07, 0x57, 0xe2, 0xae, 0xa8, 0x8d, 0xde,
+	0xbe, 0xba, 0xc0, 0x37, 0x08, 0xba, 0x3f, 0x58, 0x04, 0xc9, 0xd2, 0xeb, 0xa5, 0x5c, 0x1f, 0x19,
+	0xf7, 0xf3, 0x21, 0x6b, 0x89, 0xcb, 0x78, 0xf7, 0x55, 0x32, 0xa4, 0xb3, 0xf8, 0x51, 0x78, 0xf5,
+	0xfc, 0x7f, 0xfe, 0x9f, 0x0f, 0x4a, 0xb7, 0x60, 0xf5, 0x72, 0x50, 0x62, 0x1e, 0x59, 0x79, 0x45,
+	0xb4, 0xaa, 0x9a, 0xff, 0x54, 0x47, 0xfa, 0xaf, 0x45, 0x29, 0x03, 0xd6, 0x16, 0x1b, 0x70, 0xfa,
+	0xb6, 0xd2, 0x81, 0x5e, 0x46, 0x1b, 0x70, 0x33, 0x47, 0xe9, 0xdb, 0x23, 0x47, 0x94, 0xb3, 0x15,
+	0x46, 0x3b, 0xb2, 0xec, 0x6e, 0x1f, 0x3b, 0x72, 0xe1, 0x67, 0xbd, 0xc1, 0xb3, 0xf6, 0xb0, 0xb3,
+	0xaf, 0x57, 0xd9, 0xa1, 0xfb, 0xcf, 0x3a, 0x3d, 0x67, 0x88, 0xdb, 0xf6, 0x60, 0xcf, 0xc2, 0x72,
+	0xa9, 0x1a, 0x5b, 0x4a, 0x85, 0xa1, 0xbd, 0xd1, 0xc0, 0xea, 0x3a, 0xbb, 0x9f, 0x33, 0xa1, 0xba,
+	0x66, 0xfe, 0xa6, 0x08, 0x6b, 0xd7, 0x5d, 0xf7, 0x7f, 0x3a, 0x3a, 0x2e, 0xf8, 0x3a, 0xfd, 0x67,
+	0xcf, 0x7a, 0x43, 0x19, 0x1e, 0x17, 0x31, 0x53, 0xa2, 0xfc, 0xea, 0xee, 0xc0, 0xed, 0xbc, 0xc8,
+	0xbe, 0xed, 0xb4, 0x77, 0xfb, 0x22, 0xa4, 0x56, 0xd0, 0x1b, 0x60, 0x5c, 0x4f, 0x66, 0xd7, 0x88,
+	0x6e, 0xc3, 0x7a, 0x56, 0x62, 0x3a, 0x31, 0xa3, 0x84, 0x2c, 0xc9, 0xea, 0xea, 0x1a, 0x5a, 0x87,
+	0x15, 0x41, 0x51, 0x96, 0xc1, 0x26, 0x80, 0xf9, 0x55, 0x19, 0x96, 0x0e, 0x23, 0x9a, 0xa0, 0x5b,
+	0x50, 0x9d, 0x45, 0x34, 0x71, 0xc2, 0x88, 0xfb, 0x77, 0x13, 0x57, 0xd8, 0xd0, 0x8e, 0x58, 0x19,
+	0x17, 0xb8, 0xc7, 0x24, 0x90, 0xf5, 0x98, 0x18, 0xa0, 0x07, 0xb2, 0x0d, 0x12, 0x4e, 0x9a, 0x16,
+	0xd3, 0x11, 0x4d, 0xf8, 0x4f, 0xa6, 0x09, 0xfa, 0x1e, 0xd4, 0x5d, 0x6f, 0xea, 0x87, 0xb9, 0xa2,
+	0xcc, 0xd8, 0x96, 0x5d, 0x73, 0x9b, 0x91, 0x84, 0x4b, 0xf2, 0x5e, 0x0d, 0x83, 0xbb, 0x40, 0xd8,
+	0xd4, 0x68, 0x46, 0x28, 0x9f, 0x39, 0x8f, 0x79, 0xe0, 0xcc, 0x4c, 0xed, 0xcf, 0x08, 0x1d, 0x70,
+	0x8a, 0x9a, 0x1a, 0x2d, 0x90, 0x7c, 0x3c, 0xac, 0x5e, 0x8a, 0x87, 0x0f, 0xa1, 0x3c, 0x23, 0x84,
+	0xc6, 0x46, 0xed, 0x52, 0x2f, 0xc0, 0xb7, 0x4f, 0x08, 0x65, 0x1f, 0x58, 0xf0, 0xb0, 0xf6, 0x88,
+	0x5e, 0x38, 0x33, 0x77, 0xfc, 0x9c, 0x24, 0x31, 0xaf, 0xb3, 0x2a, 0x58, 0xa3, 0x17, 0x87, 0x02,
+	0x60, 0xb5, 0x32, 0xbd, 0x90, 0x85, 0x1f, 0x70, 0x62, 0x95, 0x5e, 0x88, 0x82, 0x6f, 0x13, 0x34,
+	0x7a, 0xe1, 0x10, 0x4a, 0x23, 0x1a, 0xf3, 0xe2, 0xaa, 0x82, 0x6b, 0xf4, 0xc2, 0xe2, 0x63, 0x26,
+	0x36, 0x49, 0xc5, 0x36, 0x84, 0xd8, 0x24, 0x2b, 0x36, 0x51, 0x62, 0x9b, 0x42, 0x6c, 0x92, 0x8a,
+	0x4d, 0x16, 0x62, 0x5b, 0x42, 0x6c, 0xa2, 0xc4, 0x7e, 0x08, 0xb5, 0x68, 0x32, 0x73, 0xd8, 0xe5,
+	0x19, 0xcb, 0x3c, 0xd0, 0xaf, 0x6f, 0x67, 0x1f, 0x21, 0x14, 0x11, 0x57, 0xa3, 0xc9, 0x8c, 0x1d,
+	0x73, 0xe3, 0x09, 0xd4, 0xd4, 0x91, 0x5f, 0x9f, 0x45, 0x32, 0x26, 0x52, 0xcc, 0x9a, 0x88, 0x19,
+	0x43, 0x4d, 0xdd, 0x39, 0x6b, 0x45, 0x53, 0x6f, 0xd2, 0xa1, 0x61, 0x0d, 0xf7, 0x2d, 0x6c, 0x5b,
+	0x43, 0xc7, 0xb6, 0x7b, 0x7a, 0x21, 0x87, 0x8c, 0xec, 0x9e, 0xe8, 0x5d, 0x0f, 0x99, 0xff, 0x1f,
+	0x0c, 0xf5, 0xd2, 0x62, 0x60, 0x8f, 0x44, 0x89, 0x73, 0x64, 0x31, 0x46, 0x46, 0x2b, 0x67, 0x86,
+	0xf6, 0x48, 0xaf, 0x98, 0x0f, 0xa1, 0xcc, 0x16, 0x8d, 0x91, 0x99, 0xcf, 0x9a, 0x8d, 0xec, 0x65,
+	0xaa, 0x8c, 0xf9, 0x17, 0x80, 0x8a, 0x48, 0x99, 0xd7, 0xb5, 0x1b, 0x8b, 0x86, 0x5e, 0x93, 0x26,
+	0x8b, 0x60, 0x89, 0x46, 0x51, 0x22, 0xb3, 0x3f, 0xff, 0x66, 0xaa, 0x99, 0xb9, 0x94, 0x84, 0x89,
+	0x23, 0x73, 0xbf, 0x86, 0x6b, 0x02, 0xe8, 0x79, 0xe8, 0x2d, 0x68, 0x49, 0xa2, 0xd2, 0xd0, 0x1a,
+	0xd7, 0x50, 0x43, 0xa0, 0x87, 0xc2, 0x95, 0xd2, 0x4e, 0xa9, 0x7c, 0xb9, 0x53, 0x9a, 0x46, 0x1e,
+	0x09, 0x64, 0x65, 0x20, 0x06, 0xac, 0xb3, 0x38, 0x75, 0xa9, 0x77, 0xee, 0xd2, 0xb4, 0x03, 0x11,
+	0x86, 0xbc, 0xac, 0xf0, 0x4c, 0x13, 0x32, 0xf1, 0xe9, 0x34, 0xc7, 0x2a, 0x3a, 0xb4, 0x65, 0x85,
+	0x2b, 0xd6, 0x77, 0xa0, 0xc2, 0x93, 0x9c, 0xb0, 0xe4, 0xfa, 0xa3, 0x56, 0x2e, 0x37, 0xc6, 0x58,
+	0x52, 0x59, 0xf3, 0x13, 0x13, 0xea, 0xbb, 0x81, 0x13, 0xce, 0xa7, 0xc7, 0x84, 0x72, 0xdb, 0xd6,
+	0x70, 0x43, 0x80, 0x36, 0xc7, 0xf2, 0x8f, 0x55, 0xc6, 0xa5, 0xc7, 0xaa, 0x07, 0xa0, 0xab, 0x67,
+	0x1a, 0x12, 0x7a, 0xb3, 0xc8, 0x0f, 0x13, 0xe3, 0xb6, 0xd8, 0x94, 0xc4, 0x2d, 0x09, 0x33, 0x7d,
+	0x9f, 0x05, 0x6e, 0xc8, 0xbd, 0xa0, 0x89, 0xf9, 0x37, 0x6b, 0x5d, 0xa6, 0xee, 0xd8, 0x71, 0x3d,
+	0x8f, 0x92, 0x58, 0xf8, 0x80, 0x86, 0x61, 0xea, 0x8e, 0xdb, 0x02, 0x41, 0xf7, 0xa1, 0xe1, 0xcf,
+	0xce, 0xfe, 0x7f, 0xc1, 0xc1, 0x3c, 0x41, 0xdb, 0xbf, 0x81, 0xeb, 0x0c, 0xcd, 0x33, 0x7d, 0xbc,
+	0x60, 0x5a, 0xce, 0x30, 0x7d, 0xac, 0x98, 0xde, 0x82, 0xe6, 0x69, 0x14, 0x27, 0x8e, 0x1b, 0x7a,
+	0xc2, 0x71, 0xd6, 0x15, 0x17, 0x83, 0xdb, 0xa1, 0xc7, 0x7d, 0xe3, 0x0e, 0x00, 0xb9, 0x48, 0xa8,
+	0xeb, 0xb8, 0xf4, 0x24, 0x36, 0x6e, 0x89, 0x57, 0x14, 0x8e, 0xb4, 0xe9, 0x49, 0x8c, 0x9e, 0x40,
+	0x73, 0x46, 0xa3, 0x8b, 0x97, 0x8b, 0xa5, 0x56, 0xb9, 0x7e, 0x37, 0xf3, 0x6f, 0x4d, 0xdb, 0x87,
+	0x8c, 0x47, 0x2e, 0x8c, 0x1b, 0xb3, 0xcc, 0xe8, 0x72, 0xa0, 0xd4, 0xff, 0xfd, 0x40, 0xb9, 0xf2,
+	0x2d, 0x02, 0xe5, 0xcd, 0x45, 0xc1, 0x75, 0x53, 0x18, 0xa5, 0xac, 0xa4, 0x76, 0xa1, 0x35, 0x8e,
+	0xc2, 0x90, 0x8c, 0x13, 0x25, 0x15, 0x71, 0xa9, 0x9b, 0x4a, 0x6a, 0x47, 0x50, 0x73, 0x82, 0x9b,
+	0xe3, 0x2c, 0x88, 0xde, 0x87, 0xca, 0x78, 0x1e, 0x27, 0xd1, 0xd4, 0x78, 0xc2, 0x95, 0xb1, 0xb6,
+	0x2d, 0x1e, 0x55, 0xb7, 0xd5, 0xa3, 0xea, 0x76, 0x3b, 0x7c, 0x89, 0x25, 0x0f, 0xfa, 0x08, 0x60,
+	0x36, 0x95, 0xad, 0x6a, 0x6c, 0xfc, 0xac, 0xc0, 0xa7, 0xac, 0x5c, 0x7e, 0x77, 0x89, 0xb1, 0x36,
+	0x5b, 0xbc, 0x23, 0x7d, 0x0a, 0xcb, 0xa2, 0xdc, 0x53, 0x45, 0x64, 0x6c, 0xfc, 0xbc, 0xf0, 0xba,
+	0xe6, 0xa2, 0xe5, 0xe7, 0x5a, 0x92, 0x8d, 0xdf, 0x17, 0xa1, 0x91, 0xbd, 0x92, 0xd7, 0x47, 0xc0,
+	0xbb, 0x50, 0x97, 0xc4, 0x4c, 0xc8, 0x00, 0x2f, 0x7d, 0xcd, 0xbd, 0x03, 0x30, 0x3e, 0x75, 0xc3,
+	0x90, 0x04, 0x6c, 0xba, 0x78, 0xbd, 0xd1, 0x24, 0xd2, 0xf3, 0xd0, 0x16, 0xe8, 0x8a, 0x2c, 0x5e,
+	0xdc, 0x64, 0x28, 0x69, 0xe2, 0x96, 0xc4, 0xf9, 0x5b, 0x54, 0xcf, 0x43, 0x3b, 0xb0, 0xaa, 0x38,
+	0x13, 0x42, 0xa7, 0x7e, 0xc8, 0xcb, 0x16, 0x19, 0x37, 0x90, 0x24, 0x0d, 0x53, 0x0a, 0x5a, 0x87,
+	0x4a, 0x14, 0xce, 0x99, 0xc0, 0x8a, 0x78, 0x6e, 0x89, 0xc2, 0xb9, 0x08, 0x4c, 0x0c, 0x8e, 0x49,
+	0xcc, 0xbc, 0x5f, 0xe5, 0xc2, 0x26, 0x6e, 0x44, 0xe1, 0x7c, 0x20, 0xc0, 0x57, 0xb8, 0x6a, 0xed,
+	0x5a, 0x57, 0xdd, 0xd5, 0xa0, 0x2a, 0x0d, 0xfc, 0x87, 0x4b, 0xb5, 0xba, 0xde, 0x30, 0xff, 0x54,
+	0x80, 0x8d, 0x4c, 0x37, 0xb2, 0xd0, 0x34, 0x79, 0x31, 0x27, 0x71, 0x82, 0xde, 0xcd, 0xeb, 0x93,
+	0x5d, 0x0d, 0x28, 0x0b, 0xea, 0x75, 0x33, 0xba, 0x5d, 0x74, 0x78, 0xa2, 0x3d, 0xb9, 0xb6, 0xc3,
+	0x43, 0xef, 0xc3, 0x8a, 0x2b, 0x1b, 0xdc, 0x7e, 0x38, 0x98, 0x8f, 0xc7, 0xcc, 0xd1, 0x44, 0x94,
+	0xbe, 0x4a, 0x40, 0x5b, 0xb0, 0x2c, 0x9e, 0xb9, 0x52, 0x5e, 0xd1, 0xb4, 0x5d, 0x86, 0xcd, 0x9f,
+	0x16, 0x00, 0x65, 0x0e, 0xf1, 0xad, 0x37, 0xff, 0xea, 0x97, 0xbc, 0x6b, 0xf6, 0x50, 0xba, 0x7e,
+	0x0f, 0x0e, 0xac, 0xe6, 0xb6, 0x10, 0xcf, 0xa2, 0x30, 0x26, 0x68, 0x1f, 0x56, 0xd5, 0x1e, 0xd2,
+	0xc6, 0x46, 0x25, 0x3b, 0x23, 0x1f, 0x5d, 0x32, 0xad, 0xdc, 0x8a, 0x77, 0x09, 0x89, 0xcd, 0xdf,
+	0x16, 0x40, 0x1f, 0x90, 0x60, 0x32, 0x24, 0x71, 0xb2, 0x10, 0xff, 0x29, 0x73, 0xff, 0x78, 0x1e,
+	0x24, 0xdc, 0xd8, 0x33, 0xbd, 0xd2, 0x65, 0xce, 0x2c, 0x30, 0x0f, 0x12, 0x2c, 0xa7, 0x99, 0x87,
+	0xd0, 0xca, 0x53, 0x58, 0x12, 0xe7, 0x3d, 0xd0, 0x60, 0xa0, 0xdf, 0x60, 0x03, 0xd6, 0xca, 0x8c,
+	0x30, 0xab, 0xa4, 0x57, 0xa0, 0x69, 0xf7, 0x87, 0x4e, 0xda, 0xc4, 0x14, 0xaf, 0x36, 0x0d, 0x25,
+	0x73, 0x07, 0xaa, 0xe2, 0x38, 0x2c, 0x32, 0xe7, 0x72, 0x7b, 0x2b, 0x7f, 0x5c, 0x95, 0xdd, 0x7f,
+	0x57, 0x82, 0xb5, 0x81, 0x3f, 0x9d, 0x07, 0x6e, 0x42, 0xda, 0x81, 0x4b, 0xa7, 0xea, 0xfe, 0x2e,
+	0xe7, 0xfa, 0x37, 0x40, 0xf3, 0x43, 0xcf, 0x1f, 0xbb, 0x49, 0xa4, 0xfe, 0x22, 0x49, 0x01, 0x56,
+	0xdf, 0xf8, 0x61, 0x32, 0x51, 0x9e, 0xab, 0xe1, 0x0a, 0x1b, 0xca, 0xec, 0xce, 0xd2, 0x3a, 0x73,
+	0x7a, 0xf1, 0x86, 0x2e, 0xf2, 0x7f, 0x63, 0x26, 0xab, 0x1e, 0xfe, 0x8c, 0x6e, 0x42, 0x93, 0xb9,
+	0x5a, 0x6a, 0x30, 0xf2, 0x09, 0x20, 0x0a, 0xe7, 0x5d, 0x65, 0x27, 0x8f, 0xe1, 0xa6, 0x1f, 0x32,
+	0xd3, 0x20, 0xce, 0xb1, 0x9f, 0x88, 0x1a, 0xce, 0xa1, 0x2c, 0xda, 0x33, 0xaf, 0x2d, 0xe3, 0x55,
+	0x49, 0xdd, 0xf5, 0x13, 0x5e, 0xcf, 0x61, 0xd1, 0x72, 0x96, 0x3d, 0xea, 0x4f, 0x12, 0xee, 0xba,
+	0x65, 0x2c, 0x06, 0x6c, 0xb7, 0x21, 0x39, 0x77, 0xc8, 0x0b, 0x8f, 0xbb, 0x6a, 0x19, 0x57, 0x42,
+	0x72, 0x6e, 0xbd, 0xf0, 0xd0, 0x7b, 0xb0, 0x22, 0x5c, 0x3e, 0x9b, 0xbd, 0xc5, 0xf3, 0xe0, 0x32,
+	0xf7, 0xfa, 0x4c, 0x02, 0xdf, 0x07, 0x8d, 0xa5, 0x02, 0x11, 0x5c, 0x80, 0x1b, 0xc0, 0x7b, 0x0b,
+	0x03, 0xb8, 0x46, 0xa3, 0x3c, 0x95, 0x70, 0x6e, 0x5e, 0xe0, 0xa7, 0x93, 0xcd, 0xb7, 0xa1, 0x99,
+	0xa3, 0x21, 0x0d, 0xca, 0xb8, 0xdd, 0x1b, 0x58, 0xe2, 0x4f, 0x8b, 0xce, 0x81, 0xd5, 0xc6, 0x7a,
+	0x61, 0xf7, 0x0b, 0xd8, 0x88, 0xe8, 0x09, 0xaf, 0x48, 0xc7, 0x11, 0xf5, 0xb6, 0xc5, 0xbf, 0x65,
+	0x72, 0xc9, 0xdd, 0xc6, 0x11, 0x1f, 0x0a, 0x75, 0x7d, 0xb1, 0x7d, 0xe2, 0x27, 0xa7, 0xf3, 0x63,
+	0xe6, 0x70, 0x3b, 0x6a, 0xc2, 0x8e, 0x98, 0xf0, 0x81, 0xfc, 0x7b, 0xed, 0xec, 0x3b, 0x3b, 0x27,
+	0x91, 0xc4, 0x8e, 0x2b, 0x1c, 0x7c, 0xfc, 0xaf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x5d, 0xe4, 0x78,
+	0x8c, 0xde, 0x1b, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/voltha/events.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/events.pb.go
new file mode 100644
index 0000000..695af06
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/events.pb.go
@@ -0,0 +1,1476 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/events.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	timestamp "github.com/golang/protobuf/ptypes/timestamp"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type EventFilterRuleKey_EventFilterRuleType int32
+
+const (
+	EventFilterRuleKey_filter_all        EventFilterRuleKey_EventFilterRuleType = 0
+	EventFilterRuleKey_category          EventFilterRuleKey_EventFilterRuleType = 1
+	EventFilterRuleKey_sub_category      EventFilterRuleKey_EventFilterRuleType = 2
+	EventFilterRuleKey_kpi_event_type    EventFilterRuleKey_EventFilterRuleType = 3
+	EventFilterRuleKey_config_event_type EventFilterRuleKey_EventFilterRuleType = 4
+	EventFilterRuleKey_device_event_type EventFilterRuleKey_EventFilterRuleType = 5
+)
+
+var EventFilterRuleKey_EventFilterRuleType_name = map[int32]string{
+	0: "filter_all",
+	1: "category",
+	2: "sub_category",
+	3: "kpi_event_type",
+	4: "config_event_type",
+	5: "device_event_type",
+}
+
+var EventFilterRuleKey_EventFilterRuleType_value = map[string]int32{
+	"filter_all":        0,
+	"category":          1,
+	"sub_category":      2,
+	"kpi_event_type":    3,
+	"config_event_type": 4,
+	"device_event_type": 5,
+}
+
+func (x EventFilterRuleKey_EventFilterRuleType) String() string {
+	return proto.EnumName(EventFilterRuleKey_EventFilterRuleType_name, int32(x))
+}
+
+func (EventFilterRuleKey_EventFilterRuleType) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{0, 0}
+}
+
+type ConfigEventType_Types int32
+
+const (
+	ConfigEventType_add    ConfigEventType_Types = 0
+	ConfigEventType_remove ConfigEventType_Types = 1
+	ConfigEventType_update ConfigEventType_Types = 2
+)
+
+var ConfigEventType_Types_name = map[int32]string{
+	0: "add",
+	1: "remove",
+	2: "update",
+}
+
+var ConfigEventType_Types_value = map[string]int32{
+	"add":    0,
+	"remove": 1,
+	"update": 2,
+}
+
+func (x ConfigEventType_Types) String() string {
+	return proto.EnumName(ConfigEventType_Types_name, int32(x))
+}
+
+func (ConfigEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{4, 0}
+}
+
+type KpiEventType_Types int32
+
+const (
+	KpiEventType_slice KpiEventType_Types = 0
+	KpiEventType_ts    KpiEventType_Types = 1
+)
+
+var KpiEventType_Types_name = map[int32]string{
+	0: "slice",
+	1: "ts",
+}
+
+var KpiEventType_Types_value = map[string]int32{
+	"slice": 0,
+	"ts":    1,
+}
+
+func (x KpiEventType_Types) String() string {
+	return proto.EnumName(KpiEventType_Types_name, int32(x))
+}
+
+func (KpiEventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{6, 0}
+}
+
+type EventCategory_Types int32
+
+const (
+	EventCategory_COMMUNICATION EventCategory_Types = 0
+	EventCategory_ENVIRONMENT   EventCategory_Types = 1
+	EventCategory_EQUIPMENT     EventCategory_Types = 2
+	EventCategory_SERVICE       EventCategory_Types = 3
+	EventCategory_PROCESSING    EventCategory_Types = 4
+	EventCategory_SECURITY      EventCategory_Types = 5
+)
+
+var EventCategory_Types_name = map[int32]string{
+	0: "COMMUNICATION",
+	1: "ENVIRONMENT",
+	2: "EQUIPMENT",
+	3: "SERVICE",
+	4: "PROCESSING",
+	5: "SECURITY",
+}
+
+var EventCategory_Types_value = map[string]int32{
+	"COMMUNICATION": 0,
+	"ENVIRONMENT":   1,
+	"EQUIPMENT":     2,
+	"SERVICE":       3,
+	"PROCESSING":    4,
+	"SECURITY":      5,
+}
+
+func (x EventCategory_Types) String() string {
+	return proto.EnumName(EventCategory_Types_name, int32(x))
+}
+
+func (EventCategory_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{14, 0}
+}
+
+type EventSubCategory_Types int32
+
+const (
+	EventSubCategory_PON  EventSubCategory_Types = 0
+	EventSubCategory_OLT  EventSubCategory_Types = 1
+	EventSubCategory_ONT  EventSubCategory_Types = 2
+	EventSubCategory_ONU  EventSubCategory_Types = 3
+	EventSubCategory_NNI  EventSubCategory_Types = 4
+	EventSubCategory_NONE EventSubCategory_Types = 5
+)
+
+var EventSubCategory_Types_name = map[int32]string{
+	0: "PON",
+	1: "OLT",
+	2: "ONT",
+	3: "ONU",
+	4: "NNI",
+	5: "NONE",
+}
+
+var EventSubCategory_Types_value = map[string]int32{
+	"PON":  0,
+	"OLT":  1,
+	"ONT":  2,
+	"ONU":  3,
+	"NNI":  4,
+	"NONE": 5,
+}
+
+func (x EventSubCategory_Types) String() string {
+	return proto.EnumName(EventSubCategory_Types_name, int32(x))
+}
+
+func (EventSubCategory_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{15, 0}
+}
+
+type EventType_Types int32
+
+const (
+	EventType_CONFIG_EVENT EventType_Types = 0
+	EventType_KPI_EVENT    EventType_Types = 1
+	EventType_KPI_EVENT2   EventType_Types = 2
+	EventType_DEVICE_EVENT EventType_Types = 3
+	EventType_RPC_EVENT    EventType_Types = 4
+)
+
+var EventType_Types_name = map[int32]string{
+	0: "CONFIG_EVENT",
+	1: "KPI_EVENT",
+	2: "KPI_EVENT2",
+	3: "DEVICE_EVENT",
+	4: "RPC_EVENT",
+}
+
+var EventType_Types_value = map[string]int32{
+	"CONFIG_EVENT": 0,
+	"KPI_EVENT":    1,
+	"KPI_EVENT2":   2,
+	"DEVICE_EVENT": 3,
+	"RPC_EVENT":    4,
+}
+
+func (x EventType_Types) String() string {
+	return proto.EnumName(EventType_Types_name, int32(x))
+}
+
+func (EventType_Types) EnumDescriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{16, 0}
+}
+
+type EventFilterRuleKey struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventFilterRuleKey) Reset()         { *m = EventFilterRuleKey{} }
+func (m *EventFilterRuleKey) String() string { return proto.CompactTextString(m) }
+func (*EventFilterRuleKey) ProtoMessage()    {}
+func (*EventFilterRuleKey) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{0}
+}
+
+func (m *EventFilterRuleKey) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilterRuleKey.Unmarshal(m, b)
+}
+func (m *EventFilterRuleKey) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilterRuleKey.Marshal(b, m, deterministic)
+}
+func (m *EventFilterRuleKey) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilterRuleKey.Merge(m, src)
+}
+func (m *EventFilterRuleKey) XXX_Size() int {
+	return xxx_messageInfo_EventFilterRuleKey.Size(m)
+}
+func (m *EventFilterRuleKey) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilterRuleKey.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilterRuleKey proto.InternalMessageInfo
+
+type EventFilterRule struct {
+	Key                  EventFilterRuleKey_EventFilterRuleType `protobuf:"varint,1,opt,name=key,proto3,enum=event.EventFilterRuleKey_EventFilterRuleType" json:"key,omitempty"`
+	Value                string                                 `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                               `json:"-"`
+	XXX_unrecognized     []byte                                 `json:"-"`
+	XXX_sizecache        int32                                  `json:"-"`
+}
+
+func (m *EventFilterRule) Reset()         { *m = EventFilterRule{} }
+func (m *EventFilterRule) String() string { return proto.CompactTextString(m) }
+func (*EventFilterRule) ProtoMessage()    {}
+func (*EventFilterRule) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{1}
+}
+
+func (m *EventFilterRule) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilterRule.Unmarshal(m, b)
+}
+func (m *EventFilterRule) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilterRule.Marshal(b, m, deterministic)
+}
+func (m *EventFilterRule) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilterRule.Merge(m, src)
+}
+func (m *EventFilterRule) XXX_Size() int {
+	return xxx_messageInfo_EventFilterRule.Size(m)
+}
+func (m *EventFilterRule) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilterRule.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilterRule proto.InternalMessageInfo
+
+func (m *EventFilterRule) GetKey() EventFilterRuleKey_EventFilterRuleType {
+	if m != nil {
+		return m.Key
+	}
+	return EventFilterRuleKey_filter_all
+}
+
+func (m *EventFilterRule) GetValue() string {
+	if m != nil {
+		return m.Value
+	}
+	return ""
+}
+
+type EventFilter struct {
+	Id                   string             `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	Enable               bool               `protobuf:"varint,2,opt,name=enable,proto3" json:"enable,omitempty"`
+	DeviceId             string             `protobuf:"bytes,3,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	EventType            string             `protobuf:"bytes,4,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"`
+	Rules                []*EventFilterRule `protobuf:"bytes,5,rep,name=rules,proto3" json:"rules,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *EventFilter) Reset()         { *m = EventFilter{} }
+func (m *EventFilter) String() string { return proto.CompactTextString(m) }
+func (*EventFilter) ProtoMessage()    {}
+func (*EventFilter) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{2}
+}
+
+func (m *EventFilter) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilter.Unmarshal(m, b)
+}
+func (m *EventFilter) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilter.Marshal(b, m, deterministic)
+}
+func (m *EventFilter) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilter.Merge(m, src)
+}
+func (m *EventFilter) XXX_Size() int {
+	return xxx_messageInfo_EventFilter.Size(m)
+}
+func (m *EventFilter) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilter.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilter proto.InternalMessageInfo
+
+func (m *EventFilter) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *EventFilter) GetEnable() bool {
+	if m != nil {
+		return m.Enable
+	}
+	return false
+}
+
+func (m *EventFilter) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *EventFilter) GetEventType() string {
+	if m != nil {
+		return m.EventType
+	}
+	return ""
+}
+
+func (m *EventFilter) GetRules() []*EventFilterRule {
+	if m != nil {
+		return m.Rules
+	}
+	return nil
+}
+
+type EventFilters struct {
+	Filters              []*EventFilter `protobuf:"bytes,1,rep,name=filters,proto3" json:"filters,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *EventFilters) Reset()         { *m = EventFilters{} }
+func (m *EventFilters) String() string { return proto.CompactTextString(m) }
+func (*EventFilters) ProtoMessage()    {}
+func (*EventFilters) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{3}
+}
+
+func (m *EventFilters) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventFilters.Unmarshal(m, b)
+}
+func (m *EventFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventFilters.Marshal(b, m, deterministic)
+}
+func (m *EventFilters) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventFilters.Merge(m, src)
+}
+func (m *EventFilters) XXX_Size() int {
+	return xxx_messageInfo_EventFilters.Size(m)
+}
+func (m *EventFilters) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventFilters.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventFilters proto.InternalMessageInfo
+
+func (m *EventFilters) GetFilters() []*EventFilter {
+	if m != nil {
+		return m.Filters
+	}
+	return nil
+}
+
+type ConfigEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *ConfigEventType) Reset()         { *m = ConfigEventType{} }
+func (m *ConfigEventType) String() string { return proto.CompactTextString(m) }
+func (*ConfigEventType) ProtoMessage()    {}
+func (*ConfigEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{4}
+}
+
+func (m *ConfigEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ConfigEventType.Unmarshal(m, b)
+}
+func (m *ConfigEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ConfigEventType.Marshal(b, m, deterministic)
+}
+func (m *ConfigEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigEventType.Merge(m, src)
+}
+func (m *ConfigEventType) XXX_Size() int {
+	return xxx_messageInfo_ConfigEventType.Size(m)
+}
+func (m *ConfigEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigEventType proto.InternalMessageInfo
+
+type ConfigEvent struct {
+	Type                 ConfigEventType_Types `protobuf:"varint,1,opt,name=type,proto3,enum=event.ConfigEventType_Types" json:"type,omitempty"`
+	Hash                 string                `protobuf:"bytes,2,opt,name=hash,proto3" json:"hash,omitempty"`
+	Data                 string                `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *ConfigEvent) Reset()         { *m = ConfigEvent{} }
+func (m *ConfigEvent) String() string { return proto.CompactTextString(m) }
+func (*ConfigEvent) ProtoMessage()    {}
+func (*ConfigEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{5}
+}
+
+func (m *ConfigEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_ConfigEvent.Unmarshal(m, b)
+}
+func (m *ConfigEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_ConfigEvent.Marshal(b, m, deterministic)
+}
+func (m *ConfigEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_ConfigEvent.Merge(m, src)
+}
+func (m *ConfigEvent) XXX_Size() int {
+	return xxx_messageInfo_ConfigEvent.Size(m)
+}
+func (m *ConfigEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_ConfigEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_ConfigEvent proto.InternalMessageInfo
+
+func (m *ConfigEvent) GetType() ConfigEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return ConfigEventType_add
+}
+
+func (m *ConfigEvent) GetHash() string {
+	if m != nil {
+		return m.Hash
+	}
+	return ""
+}
+
+func (m *ConfigEvent) GetData() string {
+	if m != nil {
+		return m.Data
+	}
+	return ""
+}
+
+type KpiEventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *KpiEventType) Reset()         { *m = KpiEventType{} }
+func (m *KpiEventType) String() string { return proto.CompactTextString(m) }
+func (*KpiEventType) ProtoMessage()    {}
+func (*KpiEventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{6}
+}
+
+func (m *KpiEventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_KpiEventType.Unmarshal(m, b)
+}
+func (m *KpiEventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_KpiEventType.Marshal(b, m, deterministic)
+}
+func (m *KpiEventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KpiEventType.Merge(m, src)
+}
+func (m *KpiEventType) XXX_Size() int {
+	return xxx_messageInfo_KpiEventType.Size(m)
+}
+func (m *KpiEventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_KpiEventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KpiEventType proto.InternalMessageInfo
+
+//
+// Struct to convey a dictionary of metric metadata.
+type MetricMetaData struct {
+	Title           string  `protobuf:"bytes,1,opt,name=title,proto3" json:"title,omitempty"`
+	Ts              float64 `protobuf:"fixed64,2,opt,name=ts,proto3" json:"ts,omitempty"`
+	LogicalDeviceId string  `protobuf:"bytes,3,opt,name=logical_device_id,json=logicalDeviceId,proto3" json:"logical_device_id,omitempty"`
+	// (equivalent to the DPID that ONOS has
+	// for the VOLTHA device without the
+	//  'of:' prefix
+	SerialNo             string            `protobuf:"bytes,4,opt,name=serial_no,json=serialNo,proto3" json:"serial_no,omitempty"`
+	DeviceId             string            `protobuf:"bytes,5,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	Context              map[string]string `protobuf:"bytes,6,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	Uuid                 string            `protobuf:"bytes,7,opt,name=uuid,proto3" json:"uuid,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *MetricMetaData) Reset()         { *m = MetricMetaData{} }
+func (m *MetricMetaData) String() string { return proto.CompactTextString(m) }
+func (*MetricMetaData) ProtoMessage()    {}
+func (*MetricMetaData) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{7}
+}
+
+func (m *MetricMetaData) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricMetaData.Unmarshal(m, b)
+}
+func (m *MetricMetaData) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricMetaData.Marshal(b, m, deterministic)
+}
+func (m *MetricMetaData) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricMetaData.Merge(m, src)
+}
+func (m *MetricMetaData) XXX_Size() int {
+	return xxx_messageInfo_MetricMetaData.Size(m)
+}
+func (m *MetricMetaData) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricMetaData.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricMetaData proto.InternalMessageInfo
+
+func (m *MetricMetaData) GetTitle() string {
+	if m != nil {
+		return m.Title
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetTs() float64 {
+	if m != nil {
+		return m.Ts
+	}
+	return 0
+}
+
+func (m *MetricMetaData) GetLogicalDeviceId() string {
+	if m != nil {
+		return m.LogicalDeviceId
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetSerialNo() string {
+	if m != nil {
+		return m.SerialNo
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *MetricMetaData) GetContext() map[string]string {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+func (m *MetricMetaData) GetUuid() string {
+	if m != nil {
+		return m.Uuid
+	}
+	return ""
+}
+
+//
+// Struct to convey a dictionary of metric->value pairs. Typically used in
+// pure shared-timestamp or shared-timestamp + shared object prefix situations.
+type MetricValuePairs struct {
+	// Metric / value pairs.
+	Metrics              map[string]float32 `protobuf:"bytes,1,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *MetricValuePairs) Reset()         { *m = MetricValuePairs{} }
+func (m *MetricValuePairs) String() string { return proto.CompactTextString(m) }
+func (*MetricValuePairs) ProtoMessage()    {}
+func (*MetricValuePairs) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{8}
+}
+
+func (m *MetricValuePairs) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricValuePairs.Unmarshal(m, b)
+}
+func (m *MetricValuePairs) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricValuePairs.Marshal(b, m, deterministic)
+}
+func (m *MetricValuePairs) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricValuePairs.Merge(m, src)
+}
+func (m *MetricValuePairs) XXX_Size() int {
+	return xxx_messageInfo_MetricValuePairs.Size(m)
+}
+func (m *MetricValuePairs) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricValuePairs.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricValuePairs proto.InternalMessageInfo
+
+func (m *MetricValuePairs) GetMetrics() map[string]float32 {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+//
+// Struct to group metadata for a metric (or group of metrics) with the key-value
+// pairs of collected metrics
+type MetricInformation struct {
+	Metadata             *MetricMetaData    `protobuf:"bytes,1,opt,name=metadata,proto3" json:"metadata,omitempty"`
+	Metrics              map[string]float32 `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"fixed32,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}           `json:"-"`
+	XXX_unrecognized     []byte             `json:"-"`
+	XXX_sizecache        int32              `json:"-"`
+}
+
+func (m *MetricInformation) Reset()         { *m = MetricInformation{} }
+func (m *MetricInformation) String() string { return proto.CompactTextString(m) }
+func (*MetricInformation) ProtoMessage()    {}
+func (*MetricInformation) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{9}
+}
+
+func (m *MetricInformation) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_MetricInformation.Unmarshal(m, b)
+}
+func (m *MetricInformation) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_MetricInformation.Marshal(b, m, deterministic)
+}
+func (m *MetricInformation) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_MetricInformation.Merge(m, src)
+}
+func (m *MetricInformation) XXX_Size() int {
+	return xxx_messageInfo_MetricInformation.Size(m)
+}
+func (m *MetricInformation) XXX_DiscardUnknown() {
+	xxx_messageInfo_MetricInformation.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_MetricInformation proto.InternalMessageInfo
+
+func (m *MetricInformation) GetMetadata() *MetricMetaData {
+	if m != nil {
+		return m.Metadata
+	}
+	return nil
+}
+
+func (m *MetricInformation) GetMetrics() map[string]float32 {
+	if m != nil {
+		return m.Metrics
+	}
+	return nil
+}
+
+//
+// Legacy KPI Event structured.  In mid-August, the KPI event format was updated
+//                               to a more easily parsable format. See VOL-1140
+//                               for more information.
+type KpiEvent struct {
+	Type                 KpiEventType_Types           `protobuf:"varint,1,opt,name=type,proto3,enum=event.KpiEventType_Types" json:"type,omitempty"`
+	Ts                   float32                      `protobuf:"fixed32,2,opt,name=ts,proto3" json:"ts,omitempty"`
+	Prefixes             map[string]*MetricValuePairs `protobuf:"bytes,3,rep,name=prefixes,proto3" json:"prefixes,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}                     `json:"-"`
+	XXX_unrecognized     []byte                       `json:"-"`
+	XXX_sizecache        int32                        `json:"-"`
+}
+
+func (m *KpiEvent) Reset()         { *m = KpiEvent{} }
+func (m *KpiEvent) String() string { return proto.CompactTextString(m) }
+func (*KpiEvent) ProtoMessage()    {}
+func (*KpiEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{10}
+}
+
+func (m *KpiEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_KpiEvent.Unmarshal(m, b)
+}
+func (m *KpiEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_KpiEvent.Marshal(b, m, deterministic)
+}
+func (m *KpiEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KpiEvent.Merge(m, src)
+}
+func (m *KpiEvent) XXX_Size() int {
+	return xxx_messageInfo_KpiEvent.Size(m)
+}
+func (m *KpiEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_KpiEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KpiEvent proto.InternalMessageInfo
+
+func (m *KpiEvent) GetType() KpiEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return KpiEventType_slice
+}
+
+func (m *KpiEvent) GetTs() float32 {
+	if m != nil {
+		return m.Ts
+	}
+	return 0
+}
+
+func (m *KpiEvent) GetPrefixes() map[string]*MetricValuePairs {
+	if m != nil {
+		return m.Prefixes
+	}
+	return nil
+}
+
+type KpiEvent2 struct {
+	// Type of KPI Event
+	Type KpiEventType_Types `protobuf:"varint,1,opt,name=type,proto3,enum=event.KpiEventType_Types" json:"type,omitempty"`
+	// Fields used when for slice:
+	Ts                   float64              `protobuf:"fixed64,2,opt,name=ts,proto3" json:"ts,omitempty"`
+	SliceData            []*MetricInformation `protobuf:"bytes,3,rep,name=slice_data,json=sliceData,proto3" json:"slice_data,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *KpiEvent2) Reset()         { *m = KpiEvent2{} }
+func (m *KpiEvent2) String() string { return proto.CompactTextString(m) }
+func (*KpiEvent2) ProtoMessage()    {}
+func (*KpiEvent2) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{11}
+}
+
+func (m *KpiEvent2) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_KpiEvent2.Unmarshal(m, b)
+}
+func (m *KpiEvent2) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_KpiEvent2.Marshal(b, m, deterministic)
+}
+func (m *KpiEvent2) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_KpiEvent2.Merge(m, src)
+}
+func (m *KpiEvent2) XXX_Size() int {
+	return xxx_messageInfo_KpiEvent2.Size(m)
+}
+func (m *KpiEvent2) XXX_DiscardUnknown() {
+	xxx_messageInfo_KpiEvent2.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_KpiEvent2 proto.InternalMessageInfo
+
+func (m *KpiEvent2) GetType() KpiEventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return KpiEventType_slice
+}
+
+func (m *KpiEvent2) GetTs() float64 {
+	if m != nil {
+		return m.Ts
+	}
+	return 0
+}
+
+func (m *KpiEvent2) GetSliceData() []*MetricInformation {
+	if m != nil {
+		return m.SliceData
+	}
+	return nil
+}
+
+//
+// Describes the events specific to device
+type DeviceEvent struct {
+	// Identifier of the originating resource of the event, for ex: device_id
+	ResourceId string `protobuf:"bytes,1,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
+	// device_event_name indicates clearly the name of the device event
+	DeviceEventName string `protobuf:"bytes,2,opt,name=device_event_name,json=deviceEventName,proto3" json:"device_event_name,omitempty"`
+	// Textual explanation of the device event
+	Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"`
+	// Key/Value storage for extra information that may give context to the event
+	Context              map[string]string `protobuf:"bytes,4,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *DeviceEvent) Reset()         { *m = DeviceEvent{} }
+func (m *DeviceEvent) String() string { return proto.CompactTextString(m) }
+func (*DeviceEvent) ProtoMessage()    {}
+func (*DeviceEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{12}
+}
+
+func (m *DeviceEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_DeviceEvent.Unmarshal(m, b)
+}
+func (m *DeviceEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_DeviceEvent.Marshal(b, m, deterministic)
+}
+func (m *DeviceEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_DeviceEvent.Merge(m, src)
+}
+func (m *DeviceEvent) XXX_Size() int {
+	return xxx_messageInfo_DeviceEvent.Size(m)
+}
+func (m *DeviceEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_DeviceEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_DeviceEvent proto.InternalMessageInfo
+
+func (m *DeviceEvent) GetResourceId() string {
+	if m != nil {
+		return m.ResourceId
+	}
+	return ""
+}
+
+func (m *DeviceEvent) GetDeviceEventName() string {
+	if m != nil {
+		return m.DeviceEventName
+	}
+	return ""
+}
+
+func (m *DeviceEvent) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *DeviceEvent) GetContext() map[string]string {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+//
+// Describes the events specific to an RPC request
+type RPCEvent struct {
+	// RPC name
+	Rpc string `protobuf:"bytes,1,opt,name=rpc,proto3" json:"rpc,omitempty"`
+	// The operation id of that request.  Can be a log correlation ID
+	OperationId string `protobuf:"bytes,2,opt,name=operation_id,json=operationId,proto3" json:"operation_id,omitempty"`
+	// Identifies the service name originating the event
+	Service string `protobuf:"bytes,3,opt,name=service,proto3" json:"service,omitempty"`
+	// Identifies the stack originating the event
+	StackId string `protobuf:"bytes,4,opt,name=stack_id,json=stackId,proto3" json:"stack_id,omitempty"`
+	// Identifies the resource upon which the action is taken, e.g. device_id
+	ResourceId string `protobuf:"bytes,5,opt,name=resource_id,json=resourceId,proto3" json:"resource_id,omitempty"`
+	// Textual explanation of the event
+	Description string `protobuf:"bytes,6,opt,name=description,proto3" json:"description,omitempty"`
+	// Key/Value storage for extra information that may give context to the event
+	Context map[string]string `protobuf:"bytes,7,rep,name=context,proto3" json:"context,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
+	// Status of the RPC Event
+	Status               *common.OperationResp `protobuf:"bytes,8,opt,name=status,proto3" json:"status,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}              `json:"-"`
+	XXX_unrecognized     []byte                `json:"-"`
+	XXX_sizecache        int32                 `json:"-"`
+}
+
+func (m *RPCEvent) Reset()         { *m = RPCEvent{} }
+func (m *RPCEvent) String() string { return proto.CompactTextString(m) }
+func (*RPCEvent) ProtoMessage()    {}
+func (*RPCEvent) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{13}
+}
+
+func (m *RPCEvent) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_RPCEvent.Unmarshal(m, b)
+}
+func (m *RPCEvent) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_RPCEvent.Marshal(b, m, deterministic)
+}
+func (m *RPCEvent) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_RPCEvent.Merge(m, src)
+}
+func (m *RPCEvent) XXX_Size() int {
+	return xxx_messageInfo_RPCEvent.Size(m)
+}
+func (m *RPCEvent) XXX_DiscardUnknown() {
+	xxx_messageInfo_RPCEvent.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_RPCEvent proto.InternalMessageInfo
+
+func (m *RPCEvent) GetRpc() string {
+	if m != nil {
+		return m.Rpc
+	}
+	return ""
+}
+
+func (m *RPCEvent) GetOperationId() string {
+	if m != nil {
+		return m.OperationId
+	}
+	return ""
+}
+
+func (m *RPCEvent) GetService() string {
+	if m != nil {
+		return m.Service
+	}
+	return ""
+}
+
+func (m *RPCEvent) GetStackId() string {
+	if m != nil {
+		return m.StackId
+	}
+	return ""
+}
+
+func (m *RPCEvent) GetResourceId() string {
+	if m != nil {
+		return m.ResourceId
+	}
+	return ""
+}
+
+func (m *RPCEvent) GetDescription() string {
+	if m != nil {
+		return m.Description
+	}
+	return ""
+}
+
+func (m *RPCEvent) GetContext() map[string]string {
+	if m != nil {
+		return m.Context
+	}
+	return nil
+}
+
+func (m *RPCEvent) GetStatus() *common.OperationResp {
+	if m != nil {
+		return m.Status
+	}
+	return nil
+}
+
+//
+// Identify the area of the system impacted by the event.
+type EventCategory struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventCategory) Reset()         { *m = EventCategory{} }
+func (m *EventCategory) String() string { return proto.CompactTextString(m) }
+func (*EventCategory) ProtoMessage()    {}
+func (*EventCategory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{14}
+}
+
+func (m *EventCategory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventCategory.Unmarshal(m, b)
+}
+func (m *EventCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventCategory.Marshal(b, m, deterministic)
+}
+func (m *EventCategory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventCategory.Merge(m, src)
+}
+func (m *EventCategory) XXX_Size() int {
+	return xxx_messageInfo_EventCategory.Size(m)
+}
+func (m *EventCategory) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventCategory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventCategory proto.InternalMessageInfo
+
+//
+// Identify the functional category originating the event
+type EventSubCategory struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventSubCategory) Reset()         { *m = EventSubCategory{} }
+func (m *EventSubCategory) String() string { return proto.CompactTextString(m) }
+func (*EventSubCategory) ProtoMessage()    {}
+func (*EventSubCategory) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{15}
+}
+
+func (m *EventSubCategory) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventSubCategory.Unmarshal(m, b)
+}
+func (m *EventSubCategory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventSubCategory.Marshal(b, m, deterministic)
+}
+func (m *EventSubCategory) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventSubCategory.Merge(m, src)
+}
+func (m *EventSubCategory) XXX_Size() int {
+	return xxx_messageInfo_EventSubCategory.Size(m)
+}
+func (m *EventSubCategory) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventSubCategory.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventSubCategory proto.InternalMessageInfo
+
+//
+// Identify the type of event
+type EventType struct {
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *EventType) Reset()         { *m = EventType{} }
+func (m *EventType) String() string { return proto.CompactTextString(m) }
+func (*EventType) ProtoMessage()    {}
+func (*EventType) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{16}
+}
+
+func (m *EventType) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventType.Unmarshal(m, b)
+}
+func (m *EventType) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventType.Marshal(b, m, deterministic)
+}
+func (m *EventType) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventType.Merge(m, src)
+}
+func (m *EventType) XXX_Size() int {
+	return xxx_messageInfo_EventType.Size(m)
+}
+func (m *EventType) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventType.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventType proto.InternalMessageInfo
+
+//
+// Identify the functional category originating the event
+type EventHeader struct {
+	// Unique ID for this event.  e.g. voltha.some_olt.1234
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// Refers to the functional area affect by the event
+	Category EventCategory_Types `protobuf:"varint,2,opt,name=category,proto3,enum=event.EventCategory_Types" json:"category,omitempty"`
+	// Refers to functional category of the event
+	SubCategory EventSubCategory_Types `protobuf:"varint,3,opt,name=sub_category,json=subCategory,proto3,enum=event.EventSubCategory_Types" json:"sub_category,omitempty"`
+	// Refers to the type of the event
+	Type EventType_Types `protobuf:"varint,4,opt,name=type,proto3,enum=event.EventType_Types" json:"type,omitempty"`
+	// The version identifier for this event type, thus allowing each
+	// event type to evolve independently. The version should be in the
+	// format “MAJOR.MINOR” format and minor changes must only be additive
+	// and non-breaking.
+	TypeVersion string `protobuf:"bytes,5,opt,name=type_version,json=typeVersion,proto3" json:"type_version,omitempty"`
+	// Timestamp at which the event was first raised.
+	// This represents the UTC time stamp since epoch (in seconds) when the
+	// the event was first raised from the source entity.
+	// If the source entity doesn't send the raised_ts, this shall be set
+	// to timestamp when the event was received.
+	RaisedTs *timestamp.Timestamp `protobuf:"bytes,6,opt,name=raised_ts,json=raisedTs,proto3" json:"raised_ts,omitempty"`
+	// Timestamp at which the event was reported.
+	// This represents the UTC time stamp since epoch (in seconds) when the
+	// the event was reported (this time stamp is >= raised_ts).
+	// If the source entity that reported this event doesn't send the
+	// reported_ts, this shall be set to the same value as raised_ts.
+	ReportedTs           *timestamp.Timestamp `protobuf:"bytes,7,opt,name=reported_ts,json=reportedTs,proto3" json:"reported_ts,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *EventHeader) Reset()         { *m = EventHeader{} }
+func (m *EventHeader) String() string { return proto.CompactTextString(m) }
+func (*EventHeader) ProtoMessage()    {}
+func (*EventHeader) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{17}
+}
+
+func (m *EventHeader) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_EventHeader.Unmarshal(m, b)
+}
+func (m *EventHeader) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_EventHeader.Marshal(b, m, deterministic)
+}
+func (m *EventHeader) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_EventHeader.Merge(m, src)
+}
+func (m *EventHeader) XXX_Size() int {
+	return xxx_messageInfo_EventHeader.Size(m)
+}
+func (m *EventHeader) XXX_DiscardUnknown() {
+	xxx_messageInfo_EventHeader.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_EventHeader proto.InternalMessageInfo
+
+func (m *EventHeader) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *EventHeader) GetCategory() EventCategory_Types {
+	if m != nil {
+		return m.Category
+	}
+	return EventCategory_COMMUNICATION
+}
+
+func (m *EventHeader) GetSubCategory() EventSubCategory_Types {
+	if m != nil {
+		return m.SubCategory
+	}
+	return EventSubCategory_PON
+}
+
+func (m *EventHeader) GetType() EventType_Types {
+	if m != nil {
+		return m.Type
+	}
+	return EventType_CONFIG_EVENT
+}
+
+func (m *EventHeader) GetTypeVersion() string {
+	if m != nil {
+		return m.TypeVersion
+	}
+	return ""
+}
+
+func (m *EventHeader) GetRaisedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.RaisedTs
+	}
+	return nil
+}
+
+func (m *EventHeader) GetReportedTs() *timestamp.Timestamp {
+	if m != nil {
+		return m.ReportedTs
+	}
+	return nil
+}
+
+//
+// Event Structure
+type Event struct {
+	// event header
+	Header *EventHeader `protobuf:"bytes,1,opt,name=header,proto3" json:"header,omitempty"`
+	// oneof event types referred by EventType.
+	//
+	// Types that are valid to be assigned to EventType:
+	//	*Event_ConfigEvent
+	//	*Event_KpiEvent
+	//	*Event_KpiEvent2
+	//	*Event_DeviceEvent
+	//	*Event_RpcEvent
+	EventType            isEvent_EventType `protobuf_oneof:"event_type"`
+	XXX_NoUnkeyedLiteral struct{}          `json:"-"`
+	XXX_unrecognized     []byte            `json:"-"`
+	XXX_sizecache        int32             `json:"-"`
+}
+
+func (m *Event) Reset()         { *m = Event{} }
+func (m *Event) String() string { return proto.CompactTextString(m) }
+func (*Event) ProtoMessage()    {}
+func (*Event) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e63e6c07044fd2c4, []int{18}
+}
+
+func (m *Event) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Event.Unmarshal(m, b)
+}
+func (m *Event) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Event.Marshal(b, m, deterministic)
+}
+func (m *Event) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Event.Merge(m, src)
+}
+func (m *Event) XXX_Size() int {
+	return xxx_messageInfo_Event.Size(m)
+}
+func (m *Event) XXX_DiscardUnknown() {
+	xxx_messageInfo_Event.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Event proto.InternalMessageInfo
+
+func (m *Event) GetHeader() *EventHeader {
+	if m != nil {
+		return m.Header
+	}
+	return nil
+}
+
+type isEvent_EventType interface {
+	isEvent_EventType()
+}
+
+type Event_ConfigEvent struct {
+	ConfigEvent *ConfigEvent `protobuf:"bytes,2,opt,name=config_event,json=configEvent,proto3,oneof"`
+}
+
+type Event_KpiEvent struct {
+	KpiEvent *KpiEvent `protobuf:"bytes,3,opt,name=kpi_event,json=kpiEvent,proto3,oneof"`
+}
+
+type Event_KpiEvent2 struct {
+	KpiEvent2 *KpiEvent2 `protobuf:"bytes,4,opt,name=kpi_event2,json=kpiEvent2,proto3,oneof"`
+}
+
+type Event_DeviceEvent struct {
+	DeviceEvent *DeviceEvent `protobuf:"bytes,5,opt,name=device_event,json=deviceEvent,proto3,oneof"`
+}
+
+type Event_RpcEvent struct {
+	RpcEvent *RPCEvent `protobuf:"bytes,6,opt,name=rpc_event,json=rpcEvent,proto3,oneof"`
+}
+
+func (*Event_ConfigEvent) isEvent_EventType() {}
+
+func (*Event_KpiEvent) isEvent_EventType() {}
+
+func (*Event_KpiEvent2) isEvent_EventType() {}
+
+func (*Event_DeviceEvent) isEvent_EventType() {}
+
+func (*Event_RpcEvent) isEvent_EventType() {}
+
+func (m *Event) GetEventType() isEvent_EventType {
+	if m != nil {
+		return m.EventType
+	}
+	return nil
+}
+
+func (m *Event) GetConfigEvent() *ConfigEvent {
+	if x, ok := m.GetEventType().(*Event_ConfigEvent); ok {
+		return x.ConfigEvent
+	}
+	return nil
+}
+
+func (m *Event) GetKpiEvent() *KpiEvent {
+	if x, ok := m.GetEventType().(*Event_KpiEvent); ok {
+		return x.KpiEvent
+	}
+	return nil
+}
+
+func (m *Event) GetKpiEvent2() *KpiEvent2 {
+	if x, ok := m.GetEventType().(*Event_KpiEvent2); ok {
+		return x.KpiEvent2
+	}
+	return nil
+}
+
+func (m *Event) GetDeviceEvent() *DeviceEvent {
+	if x, ok := m.GetEventType().(*Event_DeviceEvent); ok {
+		return x.DeviceEvent
+	}
+	return nil
+}
+
+func (m *Event) GetRpcEvent() *RPCEvent {
+	if x, ok := m.GetEventType().(*Event_RpcEvent); ok {
+		return x.RpcEvent
+	}
+	return nil
+}
+
+// XXX_OneofWrappers is for the internal use of the proto package.
+func (*Event) XXX_OneofWrappers() []interface{} {
+	return []interface{}{
+		(*Event_ConfigEvent)(nil),
+		(*Event_KpiEvent)(nil),
+		(*Event_KpiEvent2)(nil),
+		(*Event_DeviceEvent)(nil),
+		(*Event_RpcEvent)(nil),
+	}
+}
+
+func init() {
+	proto.RegisterEnum("event.EventFilterRuleKey_EventFilterRuleType", EventFilterRuleKey_EventFilterRuleType_name, EventFilterRuleKey_EventFilterRuleType_value)
+	proto.RegisterEnum("event.ConfigEventType_Types", ConfigEventType_Types_name, ConfigEventType_Types_value)
+	proto.RegisterEnum("event.KpiEventType_Types", KpiEventType_Types_name, KpiEventType_Types_value)
+	proto.RegisterEnum("event.EventCategory_Types", EventCategory_Types_name, EventCategory_Types_value)
+	proto.RegisterEnum("event.EventSubCategory_Types", EventSubCategory_Types_name, EventSubCategory_Types_value)
+	proto.RegisterEnum("event.EventType_Types", EventType_Types_name, EventType_Types_value)
+	proto.RegisterType((*EventFilterRuleKey)(nil), "event.EventFilterRuleKey")
+	proto.RegisterType((*EventFilterRule)(nil), "event.EventFilterRule")
+	proto.RegisterType((*EventFilter)(nil), "event.EventFilter")
+	proto.RegisterType((*EventFilters)(nil), "event.EventFilters")
+	proto.RegisterType((*ConfigEventType)(nil), "event.ConfigEventType")
+	proto.RegisterType((*ConfigEvent)(nil), "event.ConfigEvent")
+	proto.RegisterType((*KpiEventType)(nil), "event.KpiEventType")
+	proto.RegisterType((*MetricMetaData)(nil), "event.MetricMetaData")
+	proto.RegisterMapType((map[string]string)(nil), "event.MetricMetaData.ContextEntry")
+	proto.RegisterType((*MetricValuePairs)(nil), "event.MetricValuePairs")
+	proto.RegisterMapType((map[string]float32)(nil), "event.MetricValuePairs.MetricsEntry")
+	proto.RegisterType((*MetricInformation)(nil), "event.MetricInformation")
+	proto.RegisterMapType((map[string]float32)(nil), "event.MetricInformation.MetricsEntry")
+	proto.RegisterType((*KpiEvent)(nil), "event.KpiEvent")
+	proto.RegisterMapType((map[string]*MetricValuePairs)(nil), "event.KpiEvent.PrefixesEntry")
+	proto.RegisterType((*KpiEvent2)(nil), "event.KpiEvent2")
+	proto.RegisterType((*DeviceEvent)(nil), "event.DeviceEvent")
+	proto.RegisterMapType((map[string]string)(nil), "event.DeviceEvent.ContextEntry")
+	proto.RegisterType((*RPCEvent)(nil), "event.RPCEvent")
+	proto.RegisterMapType((map[string]string)(nil), "event.RPCEvent.ContextEntry")
+	proto.RegisterType((*EventCategory)(nil), "event.EventCategory")
+	proto.RegisterType((*EventSubCategory)(nil), "event.EventSubCategory")
+	proto.RegisterType((*EventType)(nil), "event.EventType")
+	proto.RegisterType((*EventHeader)(nil), "event.EventHeader")
+	proto.RegisterType((*Event)(nil), "event.Event")
+}
+
+func init() { proto.RegisterFile("voltha_protos/events.proto", fileDescriptor_e63e6c07044fd2c4) }
+
+var fileDescriptor_e63e6c07044fd2c4 = []byte{
+	// 1436 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x57, 0xdb, 0x6e, 0xdb, 0x46,
+	0x13, 0x16, 0xa9, 0x23, 0x87, 0xb2, 0x4d, 0xef, 0xff, 0x27, 0x55, 0x94, 0x04, 0x71, 0x88, 0x16,
+	0x30, 0x8c, 0x58, 0x6e, 0x54, 0x34, 0x6e, 0x0e, 0x48, 0xda, 0x28, 0x4a, 0x4c, 0x24, 0x96, 0x54,
+	0x5a, 0x36, 0x90, 0xde, 0x08, 0x6b, 0x72, 0x2d, 0x13, 0xa6, 0x44, 0x82, 0xbb, 0x12, 0xe2, 0xeb,
+	0x5e, 0xf4, 0xae, 0x8f, 0xd0, 0xde, 0xf7, 0x35, 0x0a, 0xf4, 0x21, 0x82, 0xbe, 0x45, 0x5f, 0xa0,
+	0xd8, 0x03, 0x25, 0x4a, 0x56, 0x1a, 0xa0, 0x41, 0xaf, 0xb4, 0x3b, 0x3b, 0xc3, 0xf9, 0xe6, 0x9b,
+	0x9d, 0xd9, 0x11, 0xd4, 0xa7, 0x51, 0xc8, 0xce, 0xf1, 0x20, 0x4e, 0x22, 0x16, 0xd1, 0x3d, 0x32,
+	0x25, 0x63, 0x46, 0x1b, 0x62, 0x87, 0x8a, 0x62, 0x57, 0xbf, 0x35, 0x8c, 0xa2, 0x61, 0x48, 0xf6,
+	0x70, 0x1c, 0xec, 0xe1, 0xf1, 0x38, 0x62, 0x98, 0x05, 0xd1, 0x58, 0x29, 0xd5, 0xef, 0xa8, 0x53,
+	0xb1, 0x3b, 0x9d, 0x9c, 0xed, 0xb1, 0x60, 0x44, 0x28, 0xc3, 0xa3, 0x58, 0x29, 0x2c, 0x79, 0xf0,
+	0xa2, 0xd1, 0x28, 0x1a, 0xcb, 0x33, 0xfb, 0x17, 0x0d, 0x50, 0x9b, 0x3b, 0x79, 0x19, 0x84, 0x8c,
+	0x24, 0xee, 0x24, 0x24, 0xaf, 0xc9, 0xa5, 0xfd, 0x93, 0x06, 0xff, 0x5b, 0x12, 0xf7, 0x2f, 0x63,
+	0x82, 0xd6, 0x01, 0xce, 0x84, 0x64, 0x80, 0xc3, 0xd0, 0xca, 0xa1, 0x2a, 0x54, 0x3c, 0xcc, 0xc8,
+	0x30, 0x4a, 0x2e, 0x2d, 0x0d, 0x59, 0x50, 0xa5, 0x93, 0xd3, 0xc1, 0x4c, 0xa2, 0x23, 0x04, 0xeb,
+	0x17, 0x71, 0x30, 0x10, 0x61, 0x0c, 0xd8, 0x65, 0x4c, 0xac, 0x3c, 0xba, 0x06, 0x9b, 0x5e, 0x34,
+	0x3e, 0x0b, 0x86, 0x59, 0x71, 0x81, 0x8b, 0x7d, 0x32, 0x0d, 0x3c, 0x92, 0x15, 0x17, 0xed, 0x73,
+	0xd8, 0x58, 0x02, 0x82, 0x9e, 0x41, 0xfe, 0x82, 0x5c, 0xd6, 0xb4, 0x2d, 0x6d, 0x7b, 0xbd, 0xb9,
+	0xdb, 0x10, 0xea, 0x8d, 0xab, 0x41, 0x34, 0x56, 0x04, 0xe0, 0x72, 0x4b, 0xf4, 0x7f, 0x28, 0x4e,
+	0x71, 0x38, 0x21, 0x35, 0x7d, 0x4b, 0xdb, 0x36, 0x5c, 0xb9, 0xb1, 0x7f, 0xd5, 0xc0, 0xcc, 0x98,
+	0xa0, 0x75, 0xd0, 0x03, 0x5f, 0x78, 0x31, 0x5c, 0x3d, 0xf0, 0xd1, 0x75, 0x28, 0x91, 0x31, 0x3e,
+	0x0d, 0xa5, 0x59, 0xc5, 0x55, 0x3b, 0x74, 0x13, 0x0c, 0x05, 0x3c, 0xf0, 0x6b, 0x79, 0xa1, 0x5e,
+	0x91, 0x02, 0xc7, 0x47, 0xb7, 0x01, 0xe6, 0xe1, 0xd4, 0x0a, 0xe2, 0xd4, 0x10, 0x12, 0xc1, 0xe7,
+	0x3d, 0x28, 0x26, 0x93, 0x90, 0xd0, 0x5a, 0x71, 0x2b, 0xbf, 0x6d, 0x36, 0xaf, 0xaf, 0x0e, 0xc6,
+	0x95, 0x4a, 0xf6, 0x13, 0xa8, 0x66, 0x4e, 0x28, 0xba, 0x07, 0x65, 0x99, 0x0d, 0x5a, 0xd3, 0x84,
+	0x3d, 0x5a, 0x61, 0x9f, 0xaa, 0xd8, 0x8f, 0x61, 0xa3, 0x25, 0x78, 0x6f, 0xa7, 0xee, 0xed, 0x6d,
+	0x28, 0xf2, 0x5f, 0x8a, 0xca, 0x90, 0xc7, 0xbe, 0x6f, 0xe5, 0x10, 0x40, 0x29, 0x21, 0xa3, 0x68,
+	0x4a, 0x2c, 0x8d, 0xaf, 0x27, 0xb1, 0x8f, 0x19, 0xb1, 0x74, 0x7b, 0x08, 0x66, 0xc6, 0x18, 0x7d,
+	0x09, 0x05, 0x11, 0x90, 0xcc, 0xc1, 0x2d, 0xe5, 0x76, 0xe9, 0xf3, 0x0d, 0xf1, 0x6d, 0x57, 0x68,
+	0x22, 0x04, 0x85, 0x73, 0x4c, 0xcf, 0x15, 0xe5, 0x62, 0xcd, 0x65, 0x3e, 0x66, 0x58, 0x91, 0x26,
+	0xd6, 0xf6, 0x0e, 0x54, 0x5f, 0xc7, 0xc1, 0x1c, 0x62, 0x3d, 0x85, 0x68, 0x40, 0x91, 0x86, 0x81,
+	0x47, 0xac, 0x1c, 0x2a, 0x81, 0xce, 0xa8, 0xa5, 0xd9, 0xbf, 0xe9, 0xb0, 0x7e, 0x48, 0x58, 0x12,
+	0x78, 0x87, 0x84, 0xe1, 0x17, 0x98, 0x61, 0x9e, 0x5a, 0x16, 0xb0, 0x90, 0xa8, 0xbc, 0xc9, 0x0d,
+	0x4f, 0x25, 0xa3, 0xc2, 0xb5, 0xe6, 0xea, 0x8c, 0xa2, 0x1d, 0xd8, 0x0c, 0xa3, 0x61, 0xe0, 0xe1,
+	0x70, 0xb0, 0x9c, 0xba, 0x0d, 0x75, 0xf0, 0x22, 0xcd, 0xe0, 0x4d, 0x30, 0x28, 0x49, 0x02, 0x1c,
+	0x0e, 0xc6, 0x91, 0x4a, 0x60, 0x45, 0x0a, 0x3a, 0xd1, 0x62, 0xee, 0x8b, 0x4b, 0xb9, 0x7f, 0x02,
+	0x65, 0x2f, 0x1a, 0x33, 0xf2, 0x8e, 0xd5, 0x4a, 0x22, 0x3d, 0xb6, 0xe2, 0x69, 0x11, 0x33, 0xa7,
+	0x8d, 0x2b, 0xb5, 0xc7, 0x2c, 0xb9, 0x74, 0x53, 0x13, 0x4e, 0xce, 0x64, 0x12, 0xf8, 0xb5, 0xb2,
+	0x24, 0x87, 0xaf, 0xeb, 0x8f, 0xa0, 0x9a, 0x55, 0x46, 0xd6, 0xbc, 0x12, 0x8c, 0x7f, 0xb8, 0xda,
+	0x8f, 0xf4, 0x6f, 0x34, 0xfb, 0x67, 0x0d, 0x2c, 0xe9, 0xf8, 0x84, 0xcb, 0x7a, 0x38, 0x48, 0x28,
+	0x7a, 0x0a, 0xe5, 0x91, 0x90, 0xa5, 0x37, 0xe8, 0xf3, 0x05, 0x88, 0x73, 0x4d, 0x25, 0xa0, 0x0a,
+	0xa4, 0x32, 0xe2, 0x80, 0xb2, 0x07, 0x1f, 0x03, 0xa4, 0x67, 0x01, 0xfd, 0xae, 0xc1, 0xa6, 0x34,
+	0x76, 0xc6, 0x67, 0x51, 0x32, 0x12, 0x4d, 0x0d, 0xdd, 0x87, 0xca, 0x88, 0x30, 0x2c, 0xee, 0x05,
+	0xff, 0x8c, 0xd9, 0xbc, 0xb6, 0x92, 0x35, 0x77, 0xa6, 0x86, 0x9e, 0xcd, 0x83, 0xd0, 0x45, 0x10,
+	0x5f, 0x2c, 0x58, 0x64, 0xbe, 0xfe, 0x1f, 0x44, 0xf1, 0x5e, 0x83, 0x4a, 0x7a, 0x61, 0xd1, 0xee,
+	0x42, 0x59, 0xdc, 0x50, 0x30, 0xb2, 0xf7, 0x79, 0xa1, 0x26, 0xe6, 0xd7, 0x52, 0x17, 0xd7, 0xf2,
+	0x21, 0x54, 0xe2, 0x84, 0x9c, 0x05, 0xef, 0x08, 0xad, 0xe5, 0x45, 0x24, 0xb7, 0x97, 0x3e, 0xd1,
+	0xe8, 0xa9, 0x73, 0x19, 0xc1, 0x4c, 0xbd, 0xde, 0x87, 0xb5, 0x85, 0xa3, 0x15, 0x31, 0xec, 0x66,
+	0x63, 0x30, 0x9b, 0x9f, 0x7d, 0x20, 0xd3, 0xd9, 0xe0, 0x7e, 0xd4, 0xc0, 0x48, 0x5d, 0x37, 0xff,
+	0x7d, 0x74, 0xb2, 0xe8, 0xf6, 0x01, 0x44, 0x01, 0x0f, 0x54, 0xcd, 0xf3, 0xf8, 0x6a, 0x1f, 0xca,
+	0x94, 0x6b, 0x08, 0x5d, 0x9e, 0x69, 0xfb, 0x2f, 0x0d, 0x4c, 0x59, 0x8e, 0x92, 0xe5, 0x3b, 0x60,
+	0x26, 0x84, 0x46, 0x93, 0x44, 0x96, 0x9d, 0x0c, 0x11, 0x52, 0x91, 0xe3, 0xf3, 0xf2, 0x5e, 0x78,
+	0x4a, 0xc6, 0x78, 0x94, 0x16, 0xc4, 0x86, 0x3f, 0xff, 0x50, 0x07, 0x8f, 0x08, 0xda, 0x02, 0xd3,
+	0x27, 0xd4, 0x4b, 0x82, 0x98, 0xbb, 0x55, 0x4d, 0x20, 0x2b, 0x42, 0x0f, 0xe7, 0x65, 0x5c, 0x10,
+	0xa0, 0xef, 0x28, 0xd0, 0x19, 0x4c, 0xab, 0x6b, 0xf8, 0x93, 0xea, 0xf5, 0xbd, 0x0e, 0x15, 0xb7,
+	0xd7, 0x92, 0x21, 0x5b, 0x90, 0x4f, 0x62, 0x2f, 0x35, 0x4c, 0x62, 0x0f, 0xdd, 0x85, 0x6a, 0x14,
+	0x93, 0x44, 0x90, 0xc5, 0x59, 0x90, 0xf6, 0xe6, 0x4c, 0xe6, 0xf8, 0xa8, 0x06, 0x65, 0x4a, 0x12,
+	0x8e, 0x51, 0x85, 0x95, 0x6e, 0xd1, 0x0d, 0xa8, 0x50, 0x86, 0xbd, 0x0b, 0x6e, 0x58, 0x50, 0x47,
+	0x7c, 0xef, 0xf8, 0xcb, 0xe4, 0x16, 0xaf, 0x90, 0xbb, 0x44, 0x58, 0xe9, 0x2a, 0x61, 0x0f, 0xe6,
+	0x84, 0x95, 0x05, 0x61, 0xe9, 0xfb, 0x90, 0x86, 0xf3, 0x81, 0x8e, 0xb7, 0x0b, 0x25, 0xca, 0x30,
+	0x9b, 0xd0, 0x5a, 0x45, 0x15, 0xbe, 0x1a, 0x55, 0xba, 0x69, 0x50, 0x2e, 0xa1, 0xb1, 0xab, 0x94,
+	0x3e, 0x89, 0xdc, 0x29, 0xac, 0x09, 0x24, 0x2d, 0x35, 0xaa, 0xd8, 0x24, 0x7d, 0x66, 0x36, 0x61,
+	0xad, 0xd5, 0x3d, 0x3c, 0x3c, 0xee, 0x38, 0xad, 0xef, 0xfa, 0x4e, 0xb7, 0x63, 0xe5, 0xd0, 0x06,
+	0x98, 0xed, 0xce, 0x89, 0xe3, 0x76, 0x3b, 0x87, 0xed, 0x4e, 0xdf, 0xd2, 0xd0, 0x1a, 0x18, 0xed,
+	0xef, 0x8f, 0x9d, 0x9e, 0xd8, 0xea, 0xc8, 0x84, 0xf2, 0x51, 0xdb, 0x3d, 0x71, 0x5a, 0x6d, 0x2b,
+	0xcf, 0x27, 0xa4, 0x9e, 0xdb, 0x6d, 0xb5, 0x8f, 0x8e, 0x9c, 0xce, 0x2b, 0xab, 0xc0, 0x27, 0xa4,
+	0xa3, 0x76, 0xeb, 0xd8, 0x75, 0xfa, 0x6f, 0xad, 0xa2, 0xed, 0x82, 0x25, 0xfc, 0x1e, 0x4d, 0x4e,
+	0x67, 0xae, 0x9f, 0x66, 0x1e, 0xe1, 0x9e, 0x70, 0x58, 0x86, 0x7c, 0xf7, 0x0d, 0x77, 0xc4, 0x17,
+	0xc2, 0x85, 0x58, 0x1c, 0x5b, 0x79, 0xbe, 0xe8, 0x74, 0x1c, 0xab, 0x80, 0x2a, 0x50, 0xe8, 0x74,
+	0x3b, 0x6d, 0xab, 0x68, 0x9f, 0x81, 0x31, 0x7f, 0x2e, 0xdf, 0xa6, 0x1f, 0xb3, 0xa0, 0xda, 0xea,
+	0x76, 0x5e, 0x3a, 0xaf, 0x06, 0xed, 0x13, 0x0e, 0x33, 0xc7, 0x51, 0xbf, 0xee, 0x39, 0x6a, 0xab,
+	0x71, 0xa0, 0xb3, 0x6d, 0xd3, 0xd2, 0xb9, 0xc1, 0x8b, 0x36, 0x0f, 0x42, 0x69, 0xe4, 0xb9, 0x81,
+	0xdb, 0x6b, 0xa9, 0x6d, 0xc1, 0xfe, 0x53, 0x57, 0xf3, 0xd1, 0x01, 0xc1, 0xfe, 0x8a, 0xf9, 0xe8,
+	0xc1, 0x7c, 0x16, 0x14, 0x84, 0xaf, 0x37, 0xeb, 0xd9, 0x71, 0x24, 0x8d, 0x57, 0xf5, 0x88, 0x99,
+	0x2e, 0xfa, 0x76, 0x71, 0x6a, 0x14, 0x77, 0x75, 0x7d, 0xd6, 0xf9, 0x96, 0xe9, 0x52, 0xe6, 0x26,
+	0x9d, 0x8b, 0xd0, 0x8e, 0x6a, 0x4c, 0x05, 0x61, 0xb9, 0x30, 0x44, 0x5d, 0xe9, 0x4a, 0x77, 0xa1,
+	0xca, 0x7f, 0x07, 0x53, 0x92, 0x50, 0x7e, 0x7f, 0xe5, 0x05, 0x37, 0xb9, 0xec, 0x44, 0x8a, 0xd0,
+	0x3e, 0x18, 0x09, 0x0e, 0x28, 0xf1, 0x07, 0x8c, 0x8a, 0xfb, 0x6d, 0x36, 0xeb, 0x0d, 0x39, 0x64,
+	0x37, 0xd2, 0x21, 0xbb, 0xd1, 0x4f, 0x87, 0x6c, 0xb7, 0x22, 0x95, 0xfb, 0x14, 0x3d, 0xe6, 0xb5,
+	0x13, 0x47, 0x09, 0x93, 0xa6, 0xe5, 0x8f, 0x9a, 0x42, 0xaa, 0xde, 0xa7, 0xf6, 0x1f, 0x3a, 0x14,
+	0x65, 0xb1, 0xef, 0x40, 0xe9, 0x5c, 0x50, 0xac, 0x1e, 0xc0, 0x85, 0xa9, 0x4e, 0x92, 0xef, 0x2a,
+	0x0d, 0xb4, 0x0f, 0xd5, 0xec, 0x30, 0xad, 0x7a, 0x3b, 0xba, 0x3a, 0x90, 0x1d, 0xe4, 0x5c, 0xd3,
+	0xcb, 0x4c, 0x70, 0x0d, 0x30, 0x66, 0x93, 0xb9, 0xa0, 0xdc, 0x6c, 0x6e, 0x2c, 0x75, 0xf4, 0x83,
+	0x9c, 0x5b, 0xb9, 0x48, 0x9f, 0xb6, 0xfb, 0x00, 0x33, 0xfd, 0xa6, 0x60, 0xda, 0x6c, 0x5a, 0x4b,
+	0x06, 0xcd, 0x83, 0x9c, 0x6b, 0x5c, 0xcc, 0xde, 0x8b, 0x7d, 0xa8, 0x66, 0xdb, 0xb0, 0xa0, 0x7a,
+	0x8e, 0x2d, 0xd3, 0x3d, 0x39, 0xb6, 0x4c, 0x5f, 0xe6, 0xd8, 0x92, 0xd8, 0x53, 0x56, 0xa5, 0x05,
+	0x6c, 0x69, 0x0b, 0xe1, 0xd8, 0x92, 0xd8, 0x13, 0xeb, 0xe7, 0xd5, 0xec, 0x90, 0xfd, 0xfc, 0x0d,
+	0xd4, 0xa3, 0x64, 0xd8, 0x88, 0x62, 0x32, 0xf6, 0xa2, 0xc4, 0x6f, 0xc8, 0x7f, 0x3f, 0xd2, 0x9e,
+	0xfe, 0xd0, 0x18, 0x06, 0xec, 0x7c, 0x72, 0xca, 0x5b, 0xcb, 0x5e, 0xaa, 0xb2, 0x27, 0x55, 0x76,
+	0xd5, 0x1f, 0xa4, 0xe9, 0xd7, 0x7b, 0xc3, 0x48, 0xc9, 0x4e, 0x4b, 0x42, 0xf8, 0xd5, 0xdf, 0x01,
+	0x00, 0x00, 0xff, 0xff, 0xb2, 0x68, 0x11, 0x5e, 0xa7, 0x0d, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/voltha/logical_device.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/logical_device.pb.go
new file mode 100644
index 0000000..bd0e4c4
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/logical_device.pb.go
@@ -0,0 +1,349 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/logical_device.proto
+
+package voltha
+
+import (
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+type LogicalPortId struct {
+	// unique id of logical device
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// id of the port on the logical device
+	PortId               string   `protobuf:"bytes,2,opt,name=port_id,json=portId,proto3" json:"port_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LogicalPortId) Reset()         { *m = LogicalPortId{} }
+func (m *LogicalPortId) String() string { return proto.CompactTextString(m) }
+func (*LogicalPortId) ProtoMessage()    {}
+func (*LogicalPortId) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{0}
+}
+
+func (m *LogicalPortId) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalPortId.Unmarshal(m, b)
+}
+func (m *LogicalPortId) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalPortId.Marshal(b, m, deterministic)
+}
+func (m *LogicalPortId) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalPortId.Merge(m, src)
+}
+func (m *LogicalPortId) XXX_Size() int {
+	return xxx_messageInfo_LogicalPortId.Size(m)
+}
+func (m *LogicalPortId) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalPortId.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalPortId proto.InternalMessageInfo
+
+func (m *LogicalPortId) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *LogicalPortId) GetPortId() string {
+	if m != nil {
+		return m.PortId
+	}
+	return ""
+}
+
+type LogicalPort struct {
+	Id                   string                    `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	OfpPort              *openflow_13.OfpPort      `protobuf:"bytes,2,opt,name=ofp_port,json=ofpPort,proto3" json:"ofp_port,omitempty"`
+	DeviceId             string                    `protobuf:"bytes,3,opt,name=device_id,json=deviceId,proto3" json:"device_id,omitempty"`
+	DevicePortNo         uint32                    `protobuf:"varint,4,opt,name=device_port_no,json=devicePortNo,proto3" json:"device_port_no,omitempty"`
+	RootPort             bool                      `protobuf:"varint,5,opt,name=root_port,json=rootPort,proto3" json:"root_port,omitempty"`
+	OfpPortStats         *openflow_13.OfpPortStats `protobuf:"bytes,6,opt,name=ofp_port_stats,json=ofpPortStats,proto3" json:"ofp_port_stats,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                  `json:"-"`
+	XXX_unrecognized     []byte                    `json:"-"`
+	XXX_sizecache        int32                     `json:"-"`
+}
+
+func (m *LogicalPort) Reset()         { *m = LogicalPort{} }
+func (m *LogicalPort) String() string { return proto.CompactTextString(m) }
+func (*LogicalPort) ProtoMessage()    {}
+func (*LogicalPort) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{1}
+}
+
+func (m *LogicalPort) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalPort.Unmarshal(m, b)
+}
+func (m *LogicalPort) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalPort.Marshal(b, m, deterministic)
+}
+func (m *LogicalPort) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalPort.Merge(m, src)
+}
+func (m *LogicalPort) XXX_Size() int {
+	return xxx_messageInfo_LogicalPort.Size(m)
+}
+func (m *LogicalPort) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalPort.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalPort proto.InternalMessageInfo
+
+func (m *LogicalPort) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *LogicalPort) GetOfpPort() *openflow_13.OfpPort {
+	if m != nil {
+		return m.OfpPort
+	}
+	return nil
+}
+
+func (m *LogicalPort) GetDeviceId() string {
+	if m != nil {
+		return m.DeviceId
+	}
+	return ""
+}
+
+func (m *LogicalPort) GetDevicePortNo() uint32 {
+	if m != nil {
+		return m.DevicePortNo
+	}
+	return 0
+}
+
+func (m *LogicalPort) GetRootPort() bool {
+	if m != nil {
+		return m.RootPort
+	}
+	return false
+}
+
+func (m *LogicalPort) GetOfpPortStats() *openflow_13.OfpPortStats {
+	if m != nil {
+		return m.OfpPortStats
+	}
+	return nil
+}
+
+type LogicalPorts struct {
+	Items                []*LogicalPort `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}       `json:"-"`
+	XXX_unrecognized     []byte         `json:"-"`
+	XXX_sizecache        int32          `json:"-"`
+}
+
+func (m *LogicalPorts) Reset()         { *m = LogicalPorts{} }
+func (m *LogicalPorts) String() string { return proto.CompactTextString(m) }
+func (*LogicalPorts) ProtoMessage()    {}
+func (*LogicalPorts) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{2}
+}
+
+func (m *LogicalPorts) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalPorts.Unmarshal(m, b)
+}
+func (m *LogicalPorts) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalPorts.Marshal(b, m, deterministic)
+}
+func (m *LogicalPorts) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalPorts.Merge(m, src)
+}
+func (m *LogicalPorts) XXX_Size() int {
+	return xxx_messageInfo_LogicalPorts.Size(m)
+}
+func (m *LogicalPorts) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalPorts.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalPorts proto.InternalMessageInfo
+
+func (m *LogicalPorts) GetItems() []*LogicalPort {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+type LogicalDevice struct {
+	// unique id of logical device
+	Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"`
+	// unique datapath id for the logical device (used by the SDN controller)
+	DatapathId uint64 `protobuf:"varint,2,opt,name=datapath_id,json=datapathId,proto3" json:"datapath_id,omitempty"`
+	// device description
+	Desc *openflow_13.OfpDesc `protobuf:"bytes,3,opt,name=desc,proto3" json:"desc,omitempty"`
+	// device features
+	SwitchFeatures *openflow_13.OfpSwitchFeatures `protobuf:"bytes,4,opt,name=switch_features,json=switchFeatures,proto3" json:"switch_features,omitempty"`
+	// name of the root device anchoring logical device
+	RootDeviceId         string   `protobuf:"bytes,5,opt,name=root_device_id,json=rootDeviceId,proto3" json:"root_device_id,omitempty"`
+	XXX_NoUnkeyedLiteral struct{} `json:"-"`
+	XXX_unrecognized     []byte   `json:"-"`
+	XXX_sizecache        int32    `json:"-"`
+}
+
+func (m *LogicalDevice) Reset()         { *m = LogicalDevice{} }
+func (m *LogicalDevice) String() string { return proto.CompactTextString(m) }
+func (*LogicalDevice) ProtoMessage()    {}
+func (*LogicalDevice) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{3}
+}
+
+func (m *LogicalDevice) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalDevice.Unmarshal(m, b)
+}
+func (m *LogicalDevice) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalDevice.Marshal(b, m, deterministic)
+}
+func (m *LogicalDevice) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalDevice.Merge(m, src)
+}
+func (m *LogicalDevice) XXX_Size() int {
+	return xxx_messageInfo_LogicalDevice.Size(m)
+}
+func (m *LogicalDevice) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalDevice.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalDevice proto.InternalMessageInfo
+
+func (m *LogicalDevice) GetId() string {
+	if m != nil {
+		return m.Id
+	}
+	return ""
+}
+
+func (m *LogicalDevice) GetDatapathId() uint64 {
+	if m != nil {
+		return m.DatapathId
+	}
+	return 0
+}
+
+func (m *LogicalDevice) GetDesc() *openflow_13.OfpDesc {
+	if m != nil {
+		return m.Desc
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetSwitchFeatures() *openflow_13.OfpSwitchFeatures {
+	if m != nil {
+		return m.SwitchFeatures
+	}
+	return nil
+}
+
+func (m *LogicalDevice) GetRootDeviceId() string {
+	if m != nil {
+		return m.RootDeviceId
+	}
+	return ""
+}
+
+type LogicalDevices struct {
+	Items                []*LogicalDevice `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}         `json:"-"`
+	XXX_unrecognized     []byte           `json:"-"`
+	XXX_sizecache        int32            `json:"-"`
+}
+
+func (m *LogicalDevices) Reset()         { *m = LogicalDevices{} }
+func (m *LogicalDevices) String() string { return proto.CompactTextString(m) }
+func (*LogicalDevices) ProtoMessage()    {}
+func (*LogicalDevices) Descriptor() ([]byte, []int) {
+	return fileDescriptor_caf139ab3abc8240, []int{4}
+}
+
+func (m *LogicalDevices) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_LogicalDevices.Unmarshal(m, b)
+}
+func (m *LogicalDevices) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_LogicalDevices.Marshal(b, m, deterministic)
+}
+func (m *LogicalDevices) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_LogicalDevices.Merge(m, src)
+}
+func (m *LogicalDevices) XXX_Size() int {
+	return xxx_messageInfo_LogicalDevices.Size(m)
+}
+func (m *LogicalDevices) XXX_DiscardUnknown() {
+	xxx_messageInfo_LogicalDevices.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_LogicalDevices proto.InternalMessageInfo
+
+func (m *LogicalDevices) GetItems() []*LogicalDevice {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*LogicalPortId)(nil), "logical_device.LogicalPortId")
+	proto.RegisterType((*LogicalPort)(nil), "logical_device.LogicalPort")
+	proto.RegisterType((*LogicalPorts)(nil), "logical_device.LogicalPorts")
+	proto.RegisterType((*LogicalDevice)(nil), "logical_device.LogicalDevice")
+	proto.RegisterType((*LogicalDevices)(nil), "logical_device.LogicalDevices")
+}
+
+func init() {
+	proto.RegisterFile("voltha_protos/logical_device.proto", fileDescriptor_caf139ab3abc8240)
+}
+
+var fileDescriptor_caf139ab3abc8240 = []byte{
+	// 447 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x53, 0xd1, 0x6a, 0xdb, 0x30,
+	0x14, 0xc5, 0x69, 0x92, 0xa6, 0x37, 0xa9, 0x0b, 0x86, 0x31, 0xd3, 0x6e, 0xd4, 0x98, 0x3d, 0x64,
+	0x0f, 0xb3, 0xd7, 0x86, 0xc1, 0x5e, 0x5b, 0xba, 0x42, 0x60, 0x6c, 0x43, 0x7b, 0xdb, 0x8b, 0x51,
+	0x2d, 0xc5, 0x11, 0xb8, 0xbe, 0xc6, 0x52, 0xd3, 0x9f, 0xdd, 0x57, 0xec, 0x0b, 0x86, 0xae, 0xec,
+	0x2d, 0x4e, 0xd2, 0x47, 0x1d, 0x9d, 0x73, 0xee, 0xd1, 0xb9, 0x36, 0xc4, 0x1b, 0x2c, 0xcd, 0x9a,
+	0x67, 0x75, 0x83, 0x06, 0x75, 0x5a, 0x62, 0xa1, 0x72, 0x5e, 0x66, 0x42, 0x6e, 0x54, 0x2e, 0x13,
+	0x42, 0x03, 0xbf, 0x8f, 0x9e, 0xbf, 0x29, 0x10, 0x8b, 0x52, 0xa6, 0xbc, 0x56, 0x29, 0xaf, 0x2a,
+	0x34, 0xdc, 0x28, 0xac, 0xb4, 0x63, 0x9f, 0x5f, 0xf6, 0x1d, 0xb1, 0x96, 0xd5, 0xaa, 0xc4, 0xe7,
+	0xec, 0x6a, 0xe1, 0x08, 0xf1, 0x67, 0x38, 0xfd, 0xea, 0x0c, 0x7f, 0x60, 0x63, 0x96, 0x22, 0xf0,
+	0x61, 0xa0, 0x44, 0xe8, 0x45, 0xde, 0xfc, 0x84, 0x0d, 0x94, 0x08, 0x5e, 0xc3, 0x71, 0x8d, 0x8d,
+	0xc9, 0x94, 0x08, 0x07, 0x04, 0x8e, 0x6b, 0x22, 0xc6, 0x7f, 0x3c, 0x98, 0x6e, 0x49, 0xf7, 0x84,
+	0x1f, 0x61, 0x82, 0xab, 0x3a, 0xb3, 0x6c, 0x52, 0x4e, 0xaf, 0x5f, 0x25, 0xdb, 0xf3, 0xbb, 0x4b,
+	0x76, 0x8c, 0xab, 0x9a, 0x1c, 0x2e, 0xe0, 0xc4, 0x3d, 0xca, 0x0e, 0x3b, 0x22, 0xa3, 0x89, 0x03,
+	0x96, 0x22, 0x78, 0x07, 0x7e, 0x7b, 0x49, 0x71, 0x2a, 0x0c, 0x87, 0x91, 0x37, 0x3f, 0x65, 0x33,
+	0x87, 0x5a, 0x83, 0x6f, 0x68, 0x2d, 0x1a, 0x44, 0xe3, 0xa6, 0x8e, 0x22, 0x6f, 0x3e, 0x61, 0x13,
+	0x0b, 0x90, 0xff, 0x0d, 0xf8, 0xdd, 0xd0, 0x4c, 0x1b, 0x6e, 0x74, 0x38, 0xa6, 0x5c, 0x17, 0x07,
+	0x73, 0x39, 0x0a, 0x9b, 0xb5, 0xe9, 0x7e, 0xda, 0x53, 0x7c, 0x03, 0xb3, 0xad, 0x37, 0xeb, 0xe0,
+	0x0a, 0x46, 0xca, 0xc8, 0x47, 0x1d, 0x7a, 0xd1, 0x11, 0x39, 0xed, 0xec, 0x6c, 0x8b, 0xcc, 0x1c,
+	0x33, 0xfe, 0xed, 0xfd, 0xab, 0xfc, 0x8e, 0x48, 0x7b, 0xcd, 0x5d, 0xc2, 0x54, 0x70, 0xc3, 0x6b,
+	0x6e, 0xd6, 0x5d, 0xed, 0x43, 0x06, 0x1d, 0xb4, 0x14, 0xc1, 0x7b, 0x18, 0x0a, 0xa9, 0x73, 0xea,
+	0xe8, 0x50, 0xad, 0xf6, 0x92, 0x11, 0x25, 0x58, 0xc2, 0x99, 0x7e, 0x56, 0x26, 0x5f, 0x67, 0x2b,
+	0xc9, 0xcd, 0x53, 0x23, 0x35, 0xf5, 0x36, 0xbd, 0x8e, 0xf6, 0x54, 0x3b, 0x3c, 0xe6, 0x3b, 0xe0,
+	0xbe, 0x3d, 0xdb, 0x0d, 0x50, 0xb7, 0xff, 0x77, 0x34, 0xa2, 0xc8, 0x33, 0x8b, 0xde, 0xb5, 0x7b,
+	0x8a, 0xbf, 0x80, 0xdf, 0x7b, 0x9d, 0x0e, 0x16, 0xfd, 0x8e, 0xde, 0xbe, 0xd0, 0x91, 0xa3, 0xb7,
+	0x2d, 0xdd, 0x4a, 0x88, 0xb1, 0x29, 0x28, 0x63, 0x8e, 0x8d, 0x48, 0xdc, 0x77, 0xbc, 0x23, 0xbd,
+	0x3d, 0xfb, 0x7e, 0xdf, 0x53, 0xff, 0x4a, 0x0a, 0x65, 0xd6, 0x4f, 0x0f, 0x49, 0x8e, 0x8f, 0x69,
+	0xa7, 0x4d, 0x9d, 0xf6, 0x43, 0xfb, 0x0f, 0x6c, 0x3e, 0xa5, 0x05, 0xb6, 0xd8, 0xc3, 0x98, 0xc0,
+	0xc5, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xd8, 0xd6, 0x61, 0x7a, 0x03, 0x00, 0x00,
+}
diff --git a/vendor/github.com/opencord/voltha-protos/v5/go/voltha/voltha.pb.go b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/voltha.pb.go
new file mode 100644
index 0000000..217a597
--- /dev/null
+++ b/vendor/github.com/opencord/voltha-protos/v5/go/voltha/voltha.pb.go
@@ -0,0 +1,3071 @@
+// Code generated by protoc-gen-go. DO NOT EDIT.
+// source: voltha_protos/voltha.proto
+
+package voltha
+
+import (
+	context "context"
+	fmt "fmt"
+	proto "github.com/golang/protobuf/proto"
+	empty "github.com/golang/protobuf/ptypes/empty"
+	common "github.com/opencord/voltha-protos/v5/go/common"
+	extension "github.com/opencord/voltha-protos/v5/go/extension"
+	health "github.com/opencord/voltha-protos/v5/go/health"
+	omci "github.com/opencord/voltha-protos/v5/go/omci"
+	openflow_13 "github.com/opencord/voltha-protos/v5/go/openflow_13"
+	_ "google.golang.org/genproto/googleapis/api/annotations"
+	grpc "google.golang.org/grpc"
+	codes "google.golang.org/grpc/codes"
+	status "google.golang.org/grpc/status"
+	math "math"
+)
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = fmt.Errorf
+var _ = math.Inf
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the proto package it is being compiled against.
+// A compilation error at this line likely means your copy of the
+// proto package needs to be updated.
+const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package
+
+// ID from public import voltha_protos/common.proto
+type ID = common.ID
+
+// IDs from public import voltha_protos/common.proto
+type IDs = common.IDs
+
+// Connection from public import voltha_protos/common.proto
+type Connection = common.Connection
+
+// AdminState from public import voltha_protos/common.proto
+type AdminState = common.AdminState
+
+// OperStatus from public import voltha_protos/common.proto
+type OperStatus = common.OperStatus
+
+// ConnectStatus from public import voltha_protos/common.proto
+type ConnectStatus = common.ConnectStatus
+
+// OperationResp from public import voltha_protos/common.proto
+type OperationResp = common.OperationResp
+
+// TestModeKeys from public import voltha_protos/common.proto
+type TestModeKeys = common.TestModeKeys
+
+var TestModeKeys_name = common.TestModeKeys_name
+var TestModeKeys_value = common.TestModeKeys_value
+
+const TestModeKeys_api_test = TestModeKeys(common.TestModeKeys_api_test)
+
+// AdminState_Types from public import voltha_protos/common.proto
+type AdminState_Types = common.AdminState_Types
+
+var AdminState_Types_name = common.AdminState_Types_name
+var AdminState_Types_value = common.AdminState_Types_value
+
+const AdminState_UNKNOWN = AdminState_Types(common.AdminState_UNKNOWN)
+const AdminState_PREPROVISIONED = AdminState_Types(common.AdminState_PREPROVISIONED)
+const AdminState_ENABLED = AdminState_Types(common.AdminState_ENABLED)
+const AdminState_DISABLED = AdminState_Types(common.AdminState_DISABLED)
+const AdminState_DOWNLOADING_IMAGE = AdminState_Types(common.AdminState_DOWNLOADING_IMAGE)
+
+// OperStatus_Types from public import voltha_protos/common.proto
+type OperStatus_Types = common.OperStatus_Types
+
+var OperStatus_Types_name = common.OperStatus_Types_name
+var OperStatus_Types_value = common.OperStatus_Types_value
+
+const OperStatus_UNKNOWN = OperStatus_Types(common.OperStatus_UNKNOWN)
+const OperStatus_DISCOVERED = OperStatus_Types(common.OperStatus_DISCOVERED)
+const OperStatus_ACTIVATING = OperStatus_Types(common.OperStatus_ACTIVATING)
+const OperStatus_TESTING = OperStatus_Types(common.OperStatus_TESTING)
+const OperStatus_ACTIVE = OperStatus_Types(common.OperStatus_ACTIVE)
+const OperStatus_FAILED = OperStatus_Types(common.OperStatus_FAILED)
+const OperStatus_RECONCILING = OperStatus_Types(common.OperStatus_RECONCILING)
+const OperStatus_RECONCILING_FAILED = OperStatus_Types(common.OperStatus_RECONCILING_FAILED)
+
+// ConnectStatus_Types from public import voltha_protos/common.proto
+type ConnectStatus_Types = common.ConnectStatus_Types
+
+var ConnectStatus_Types_name = common.ConnectStatus_Types_name
+var ConnectStatus_Types_value = common.ConnectStatus_Types_value
+
+const ConnectStatus_UNKNOWN = ConnectStatus_Types(common.ConnectStatus_UNKNOWN)
+const ConnectStatus_UNREACHABLE = ConnectStatus_Types(common.ConnectStatus_UNREACHABLE)
+const ConnectStatus_REACHABLE = ConnectStatus_Types(common.ConnectStatus_REACHABLE)
+
+// OperationResp_OperationReturnCode from public import voltha_protos/common.proto
+type OperationResp_OperationReturnCode = common.OperationResp_OperationReturnCode
+
+var OperationResp_OperationReturnCode_name = common.OperationResp_OperationReturnCode_name
+var OperationResp_OperationReturnCode_value = common.OperationResp_OperationReturnCode_value
+
+const OperationResp_OPERATION_SUCCESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_SUCCESS)
+const OperationResp_OPERATION_FAILURE = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_FAILURE)
+const OperationResp_OPERATION_UNSUPPORTED = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_UNSUPPORTED)
+const OperationResp_OPERATION_IN_PROGRESS = OperationResp_OperationReturnCode(common.OperationResp_OPERATION_IN_PROGRESS)
+
+// CoreInstance represents a core instance.  It is data held in memory when a core
+// is running.  This data is not persistent.
+type CoreInstance struct {
+	InstanceId           string               `protobuf:"bytes,1,opt,name=instance_id,json=instanceId,proto3" json:"instance_id,omitempty"`
+	Health               *health.HealthStatus `protobuf:"bytes,2,opt,name=health,proto3" json:"health,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}             `json:"-"`
+	XXX_unrecognized     []byte               `json:"-"`
+	XXX_sizecache        int32                `json:"-"`
+}
+
+func (m *CoreInstance) Reset()         { *m = CoreInstance{} }
+func (m *CoreInstance) String() string { return proto.CompactTextString(m) }
+func (*CoreInstance) ProtoMessage()    {}
+func (*CoreInstance) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{0}
+}
+
+func (m *CoreInstance) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CoreInstance.Unmarshal(m, b)
+}
+func (m *CoreInstance) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CoreInstance.Marshal(b, m, deterministic)
+}
+func (m *CoreInstance) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CoreInstance.Merge(m, src)
+}
+func (m *CoreInstance) XXX_Size() int {
+	return xxx_messageInfo_CoreInstance.Size(m)
+}
+func (m *CoreInstance) XXX_DiscardUnknown() {
+	xxx_messageInfo_CoreInstance.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CoreInstance proto.InternalMessageInfo
+
+func (m *CoreInstance) GetInstanceId() string {
+	if m != nil {
+		return m.InstanceId
+	}
+	return ""
+}
+
+func (m *CoreInstance) GetHealth() *health.HealthStatus {
+	if m != nil {
+		return m.Health
+	}
+	return nil
+}
+
+type CoreInstances struct {
+	Items                []*CoreInstance `protobuf:"bytes,1,rep,name=items,proto3" json:"items,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}        `json:"-"`
+	XXX_unrecognized     []byte          `json:"-"`
+	XXX_sizecache        int32           `json:"-"`
+}
+
+func (m *CoreInstances) Reset()         { *m = CoreInstances{} }
+func (m *CoreInstances) String() string { return proto.CompactTextString(m) }
+func (*CoreInstances) ProtoMessage()    {}
+func (*CoreInstances) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{1}
+}
+
+func (m *CoreInstances) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_CoreInstances.Unmarshal(m, b)
+}
+func (m *CoreInstances) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_CoreInstances.Marshal(b, m, deterministic)
+}
+func (m *CoreInstances) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_CoreInstances.Merge(m, src)
+}
+func (m *CoreInstances) XXX_Size() int {
+	return xxx_messageInfo_CoreInstances.Size(m)
+}
+func (m *CoreInstances) XXX_DiscardUnknown() {
+	xxx_messageInfo_CoreInstances.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_CoreInstances proto.InternalMessageInfo
+
+func (m *CoreInstances) GetItems() []*CoreInstance {
+	if m != nil {
+		return m.Items
+	}
+	return nil
+}
+
+// Voltha represents the Voltha cluster data.  Each Core instance will hold a subset of
+// the entire cluster. However, some items (e.g. adapters) will be held by all cores
+// for better performance
+type Voltha struct {
+	Version              string                  `protobuf:"bytes,1,opt,name=version,proto3" json:"version,omitempty"`
+	Adapters             []*Adapter              `protobuf:"bytes,2,rep,name=adapters,proto3" json:"adapters,omitempty"`
+	LogicalDevices       []*LogicalDevice        `protobuf:"bytes,3,rep,name=logical_devices,json=logicalDevices,proto3" json:"logical_devices,omitempty"`
+	Devices              []*Device               `protobuf:"bytes,4,rep,name=devices,proto3" json:"devices,omitempty"`
+	DeviceTypes          []*DeviceType           `protobuf:"bytes,5,rep,name=device_types,json=deviceTypes,proto3" json:"device_types,omitempty"`
+	EventFilters         []*EventFilter          `protobuf:"bytes,7,rep,name=event_filters,json=eventFilters,proto3" json:"event_filters,omitempty"`
+	OmciMibDatabase      []*omci.MibDeviceData   `protobuf:"bytes,28,rep,name=omci_mib_database,json=omciMibDatabase,proto3" json:"omci_mib_database,omitempty"`
+	OmciAlarmDatabase    []*omci.AlarmDeviceData `protobuf:"bytes,29,rep,name=omci_alarm_database,json=omciAlarmDatabase,proto3" json:"omci_alarm_database,omitempty"`
+	XXX_NoUnkeyedLiteral struct{}                `json:"-"`
+	XXX_unrecognized     []byte                  `json:"-"`
+	XXX_sizecache        int32                   `json:"-"`
+}
+
+func (m *Voltha) Reset()         { *m = Voltha{} }
+func (m *Voltha) String() string { return proto.CompactTextString(m) }
+func (*Voltha) ProtoMessage()    {}
+func (*Voltha) Descriptor() ([]byte, []int) {
+	return fileDescriptor_e084f1a60ce7016c, []int{2}
+}
+
+func (m *Voltha) XXX_Unmarshal(b []byte) error {
+	return xxx_messageInfo_Voltha.Unmarshal(m, b)
+}
+func (m *Voltha) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
+	return xxx_messageInfo_Voltha.Marshal(b, m, deterministic)
+}
+func (m *Voltha) XXX_Merge(src proto.Message) {
+	xxx_messageInfo_Voltha.Merge(m, src)
+}
+func (m *Voltha) XXX_Size() int {
+	return xxx_messageInfo_Voltha.Size(m)
+}
+func (m *Voltha) XXX_DiscardUnknown() {
+	xxx_messageInfo_Voltha.DiscardUnknown(m)
+}
+
+var xxx_messageInfo_Voltha proto.InternalMessageInfo
+
+func (m *Voltha) GetVersion() string {
+	if m != nil {
+		return m.Version
+	}
+	return ""
+}
+
+func (m *Voltha) GetAdapters() []*Adapter {
+	if m != nil {
+		return m.Adapters
+	}
+	return nil
+}
+
+func (m *Voltha) GetLogicalDevices() []*LogicalDevice {
+	if m != nil {
+		return m.LogicalDevices
+	}
+	return nil
+}
+
+func (m *Voltha) GetDevices() []*Device {
+	if m != nil {
+		return m.Devices
+	}
+	return nil
+}
+
+func (m *Voltha) GetDeviceTypes() []*DeviceType {
+	if m != nil {
+		return m.DeviceTypes
+	}
+	return nil
+}
+
+func (m *Voltha) GetEventFilters() []*EventFilter {
+	if m != nil {
+		return m.EventFilters
+	}
+	return nil
+}
+
+func (m *Voltha) GetOmciMibDatabase() []*omci.MibDeviceData {
+	if m != nil {
+		return m.OmciMibDatabase
+	}
+	return nil
+}
+
+func (m *Voltha) GetOmciAlarmDatabase() []*omci.AlarmDeviceData {
+	if m != nil {
+		return m.OmciAlarmDatabase
+	}
+	return nil
+}
+
+func init() {
+	proto.RegisterType((*CoreInstance)(nil), "voltha.CoreInstance")
+	proto.RegisterType((*CoreInstances)(nil), "voltha.CoreInstances")
+	proto.RegisterType((*Voltha)(nil), "voltha.Voltha")
+}
+
+func init() { proto.RegisterFile("voltha_protos/voltha.proto", fileDescriptor_e084f1a60ce7016c) }
+
+var fileDescriptor_e084f1a60ce7016c = []byte{
+	// 2130 bytes of a gzipped FileDescriptorProto
+	0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xeb, 0x6e, 0x1b, 0xb9,
+	0x15, 0xde, 0x71, 0x76, 0x73, 0xa1, 0x25, 0xdb, 0xa2, 0x7c, 0x91, 0x25, 0xf9, 0xc6, 0x64, 0xb3,
+	0x5e, 0x25, 0x91, 0x92, 0x38, 0xd9, 0xb6, 0xd9, 0x2e, 0x0a, 0xaf, 0x6f, 0xf5, 0x36, 0x81, 0x0d,
+	0x29, 0x71, 0xda, 0x62, 0xb3, 0xc2, 0x48, 0x43, 0xcb, 0x83, 0x8e, 0x66, 0xd4, 0x21, 0x65, 0x27,
+	0x30, 0xf6, 0x4f, 0x2f, 0x40, 0x8a, 0xa2, 0xe8, 0x8f, 0x7d, 0x8b, 0x3e, 0x45, 0xff, 0xf5, 0x01,
+	0xfa, 0x0a, 0x7d, 0x90, 0x82, 0x87, 0xa4, 0x3c, 0xd4, 0xcc, 0xc8, 0x56, 0xba, 0x40, 0xff, 0x58,
+	0x26, 0x0f, 0xf9, 0x7d, 0x1f, 0xcf, 0x39, 0xbc, 0x0e, 0x2a, 0x9e, 0x06, 0x1e, 0x3f, 0xb1, 0x9b,
+	0xbd, 0x30, 0xe0, 0x01, 0xab, 0xc9, 0x52, 0x15, 0x4a, 0xf8, 0xba, 0x2c, 0x15, 0xcb, 0x9d, 0x20,
+	0xe8, 0x78, 0xb4, 0x66, 0xf7, 0xdc, 0x9a, 0xed, 0xfb, 0x01, 0xb7, 0xb9, 0x1b, 0xf8, 0x4c, 0xb6,
+	0x2a, 0x96, 0x94, 0x15, 0x4a, 0xad, 0xfe, 0x71, 0x8d, 0x76, 0x7b, 0xfc, 0x9d, 0x32, 0x0e, 0xc1,
+	0xb7, 0x83, 0x6e, 0x37, 0xf0, 0x93, 0x6d, 0x27, 0xd4, 0xf6, 0xf8, 0x89, 0xb2, 0x11, 0xd3, 0xe6,
+	0x05, 0x1d, 0xb7, 0x6d, 0x7b, 0x4d, 0x87, 0x9e, 0xba, 0x6d, 0x9a, 0xdc, 0xdf, 0xb0, 0x95, 0x4c,
+	0x9b, 0xed, 0xd8, 0x3d, 0x4e, 0x43, 0x65, 0x5c, 0x31, 0x8d, 0x41, 0x8f, 0xfa, 0xc7, 0x5e, 0x70,
+	0xd6, 0x7c, 0xb4, 0x91, 0x8c, 0x4c, 0x4f, 0xa9, 0xcf, 0xf5, 0x70, 0x97, 0x87, 0x6c, 0x6f, 0x39,
+	0xf5, 0x59, 0xc4, 0x1d, 0xc3, 0xe0, 0xdd, 0xb6, 0xdb, 0xec, 0xba, 0xad, 0xa6, 0xd3, 0x52, 0x0d,
+	0xd6, 0x12, 0x1a, 0xd8, 0x9e, 0x1d, 0x76, 0x2f, 0x9a, 0x2c, 0x25, 0x34, 0xe1, 0x94, 0x71, 0x69,
+	0x26, 0x6f, 0x50, 0x66, 0x2b, 0x08, 0xe9, 0xbe, 0xcf, 0xb8, 0xed, 0xb7, 0x29, 0x5e, 0x41, 0x93,
+	0xae, 0xfa, 0xbf, 0xe9, 0x3a, 0x05, 0x6b, 0xd5, 0x5a, 0xbf, 0x55, 0x47, 0xba, 0x6a, 0xdf, 0xc1,
+	0xf7, 0xd1, 0x75, 0xe9, 0xdd, 0xc2, 0xc4, 0xaa, 0xb5, 0x3e, 0xf9, 0x78, 0xb6, 0xaa, 0x9c, 0xfd,
+	0x4b, 0xf8, 0x69, 0x70, 0x9b, 0xf7, 0x59, 0x5d, 0xb5, 0x21, 0x5f, 0xa2, 0x6c, 0x14, 0x9e, 0xe1,
+	0x0a, 0xfa, 0xc4, 0xe5, 0xb4, 0xcb, 0x0a, 0xd6, 0xea, 0x35, 0xe8, 0xad, 0xb2, 0x24, 0xda, 0xaa,
+	0x2e, 0x9b, 0x90, 0x7f, 0x5e, 0x43, 0xd7, 0x8f, 0xc0, 0x8c, 0x0b, 0xe8, 0xc6, 0x29, 0x0d, 0x85,
+	0x6f, 0x94, 0x24, 0x5d, 0xc4, 0xf7, 0xd1, 0x4d, 0x15, 0x11, 0x56, 0x98, 0x00, 0xcc, 0x99, 0xaa,
+	0x0e, 0xd1, 0xa6, 0xfc, 0xad, 0x0f, 0x5a, 0xe0, 0x5d, 0x34, 0x6d, 0xc6, 0x9f, 0x15, 0xae, 0x41,
+	0xa7, 0xa5, 0xea, 0x50, 0x5e, 0x3c, 0x97, 0xc5, 0x6d, 0x28, 0xd5, 0xa7, 0xbc, 0x68, 0x91, 0xe1,
+	0x75, 0x74, 0x43, 0xf7, 0xff, 0x18, 0xfa, 0x4f, 0x55, 0x55, 0x3f, 0xd5, 0x41, 0x9b, 0xf1, 0x53,
+	0x94, 0x91, 0xff, 0x36, 0xf9, 0xbb, 0x1e, 0x65, 0x85, 0x4f, 0xa0, 0x39, 0x36, 0x9b, 0xbf, 0x7c,
+	0xd7, 0xa3, 0xf5, 0x49, 0x67, 0xf0, 0x3f, 0xc3, 0x3f, 0x41, 0x59, 0x48, 0x95, 0xe6, 0xb1, 0xeb,
+	0xc1, 0xd8, 0x6e, 0xa8, 0x7e, 0x50, 0x5b, 0xdd, 0x11, 0x7f, 0x77, 0xc1, 0x54, 0xcf, 0xd0, 0x8b,
+	0x02, 0xc3, 0xbf, 0x40, 0xb9, 0x8b, 0x3c, 0xb1, 0xb9, 0xdd, 0xb2, 0x19, 0x2d, 0x94, 0xa1, 0x73,
+	0xbe, 0x2a, 0x2c, 0xd5, 0x17, 0x6e, 0x4b, 0xb2, 0x6e, 0xdb, 0xdc, 0xae, 0x4f, 0x8b, 0x3a, 0x51,
+	0xa5, 0xda, 0xe2, 0x1d, 0x94, 0x8f, 0xe6, 0x91, 0x86, 0x58, 0x02, 0x88, 0x39, 0x09, 0xb1, 0x29,
+	0x6c, 0x11, 0x10, 0xa0, 0x94, 0x95, 0xaa, 0xfd, 0x37, 0x1f, 0xdf, 0xbc, 0x3e, 0x73, 0xe3, 0xf1,
+	0xbf, 0x36, 0x50, 0x56, 0x86, 0xb0, 0x41, 0x43, 0xd1, 0x1c, 0x6f, 0xa3, 0x5b, 0x7b, 0x94, 0xab,
+	0xb0, 0xce, 0x57, 0xe5, 0x84, 0xaf, 0xea, 0x09, 0x5f, 0xdd, 0x11, 0x13, 0xbe, 0x38, 0xa5, 0xd3,
+	0x42, 0xb6, 0x23, 0xd3, 0x7f, 0xf8, 0xf7, 0x7f, 0x7e, 0x98, 0xb8, 0x85, 0x6f, 0xc0, 0xba, 0x71,
+	0xfa, 0x08, 0xbf, 0x41, 0xb9, 0xe7, 0x2e, 0xe3, 0x66, 0x6e, 0xa5, 0xa1, 0xcd, 0x25, 0x25, 0x19,
+	0x23, 0x8b, 0x00, 0x9a, 0xc7, 0x39, 0x05, 0x5a, 0x73, 0x07, 0x48, 0x0d, 0x34, 0xbd, 0x47, 0x0d,
+	0x74, 0x8c, 0xaa, 0x6a, 0xc1, 0xd9, 0xdf, 0x2e, 0x26, 0x66, 0x2d, 0x59, 0x06, 0xbc, 0x02, 0x9e,
+	0x8f, 0xe1, 0xd5, 0xce, 0x5d, 0xe7, 0x7b, 0xdc, 0x40, 0x19, 0xa1, 0x79, 0x53, 0xe7, 0x62, 0x9a,
+	0xdc, 0xdc, 0x70, 0xfe, 0x32, 0x52, 0x00, 0x68, 0x8c, 0x67, 0x34, 0xf4, 0x20, 0xa1, 0xbb, 0x08,
+	0x0b, 0xd0, 0xe7, 0x66, 0x7a, 0xa6, 0x41, 0x2f, 0x8f, 0xcc, 0x72, 0x46, 0x56, 0x80, 0x67, 0x11,
+	0x2f, 0x68, 0x9e, 0xa1, 0xc9, 0x82, 0xdb, 0x68, 0x66, 0x8f, 0x9a, 0x6c, 0x86, 0x67, 0x46, 0x4f,
+	0x23, 0x72, 0x07, 0xf0, 0x97, 0x71, 0x39, 0x05, 0x5f, 0x3a, 0xca, 0x47, 0xf3, 0xb1, 0x31, 0x1d,
+	0x06, 0x21, 0x67, 0x06, 0x55, 0x39, 0x85, 0x0a, 0x5a, 0x92, 0x0a, 0x30, 0xdd, 0xc1, 0x64, 0x14,
+	0x53, 0xad, 0x07, 0xa8, 0xef, 0x2d, 0x34, 0x3b, 0x3c, 0x2a, 0x81, 0x82, 0x97, 0x46, 0x50, 0xec,
+	0x3b, 0xc5, 0xd2, 0x08, 0x33, 0x79, 0x02, 0x02, 0xaa, 0xf8, 0xfe, 0xe5, 0x02, 0x6a, 0xe7, 0xe2,
+	0xa7, 0x29, 0x86, 0xfe, 0x37, 0x0b, 0x2d, 0xec, 0xf8, 0x76, 0xcb, 0xa3, 0x63, 0xab, 0x49, 0x89,
+	0x39, 0xf9, 0x12, 0x84, 0x3c, 0x25, 0x1b, 0xe3, 0x08, 0xa9, 0x51, 0x10, 0x81, 0xff, 0x6e, 0xa1,
+	0xc2, 0xb6, 0xcb, 0x7e, 0x54, 0x41, 0x3f, 0x07, 0x41, 0x5f, 0x90, 0x27, 0x63, 0x09, 0x72, 0xa4,
+	0x0a, 0xec, 0x24, 0x24, 0xc7, 0xae, 0x17, 0x9c, 0x99, 0xc9, 0x81, 0xab, 0xd1, 0x9d, 0x18, 0xec,
+	0x57, 0x4c, 0x89, 0x63, 0xc0, 0xfa, 0xa3, 0x85, 0xca, 0xaf, 0x7a, 0x8e, 0xcd, 0x69, 0x8c, 0xe8,
+	0x25, 0xc8, 0x28, 0xc7, 0x08, 0xa0, 0x5e, 0xf6, 0x49, 0x1d, 0xfa, 0x03, 0x90, 0xf0, 0x19, 0xb9,
+	0x82, 0x84, 0x67, 0x56, 0x05, 0xff, 0xc9, 0x42, 0x4b, 0x09, 0x2a, 0x5e, 0x50, 0x4e, 0x43, 0x29,
+	0xa3, 0x64, 0xc8, 0x00, 0xc3, 0x8b, 0xc0, 0xb9, 0x44, 0x45, 0x15, 0x54, 0xac, 0x93, 0xdb, 0x23,
+	0x55, 0x74, 0x05, 0x18, 0xc8, 0xe8, 0xa0, 0x85, 0x98, 0xcb, 0x81, 0xca, 0xf4, 0x79, 0x3e, 0xae,
+	0x85, 0x91, 0x7b, 0xc0, 0xf5, 0x29, 0xbe, 0x0a, 0x17, 0xe6, 0xa8, 0x94, 0x18, 0xdb, 0xbd, 0x30,
+	0xe8, 0xf7, 0x4c, 0xb2, 0x85, 0x98, 0xff, 0x65, 0x23, 0xf2, 0x10, 0x08, 0x2b, 0x78, 0xfd, 0x52,
+	0x17, 0x37, 0x3b, 0x12, 0xf6, 0x07, 0x0b, 0xad, 0xa5, 0xc4, 0x1a, 0x30, 0xa5, 0xa7, 0xd7, 0x92,
+	0x09, 0xaf, 0x12, 0xf5, 0x0d, 0x90, 0xf4, 0x80, 0x5c, 0x59, 0x92, 0x70, 0xfa, 0x01, 0x9a, 0x14,
+	0xbe, 0xb8, 0x6c, 0x45, 0x9f, 0x36, 0x0f, 0x12, 0x8c, 0x2c, 0x00, 0x59, 0x0e, 0x4f, 0x6b, 0x32,
+	0xbd, 0x74, 0x1f, 0xa0, 0xec, 0x05, 0xe0, 0xbe, 0x93, 0x0e, 0x39, 0x79, 0xe1, 0xe6, 0x84, 0x4d,
+	0x52, 0xc2, 0xb9, 0x0e, 0xc3, 0xaf, 0xd0, 0x4c, 0x9d, 0xb6, 0x03, 0xbf, 0xed, 0x7a, 0x54, 0xcb,
+	0x8c, 0xf6, 0x4d, 0xf5, 0x47, 0x19, 0x30, 0xe7, 0x49, 0x1c, 0x53, 0x0c, 0x7c, 0x07, 0x0e, 0x08,
+	0x09, 0x7b, 0xcb, 0xd0, 0x11, 0x4b, 0xc3, 0xe0, 0xd9, 0xa1, 0x91, 0xca, 0x4d, 0xe4, 0x1b, 0x94,
+	0xd9, 0x0a, 0xa9, 0xcd, 0x95, 0x34, 0x3c, 0xd4, 0x3b, 0x86, 0x56, 0x04, 0xb4, 0x59, 0x32, 0xec,
+	0x37, 0x21, 0xe9, 0x35, 0xca, 0xc8, 0x45, 0x39, 0x41, 0x55, 0xda, 0x20, 0x6f, 0x03, 0xde, 0x12,
+	0x29, 0x25, 0xa9, 0xd3, 0xcb, 0xeb, 0x6f, 0x50, 0x56, 0xad, 0xae, 0x63, 0x20, 0xab, 0x4d, 0x94,
+	0x94, 0x13, 0x91, 0xf5, 0x3a, 0xf9, 0x1a, 0x65, 0xea, 0xb4, 0x15, 0x04, 0xfc, 0x47, 0xd3, 0x1c,
+	0x02, 0x9c, 0x00, 0xde, 0xa6, 0x1e, 0xe5, 0x1f, 0xe0, 0x8c, 0x4a, 0x32, 0xb0, 0x03, 0x70, 0xb8,
+	0x85, 0x72, 0xbb, 0x41, 0xd8, 0xa6, 0x63, 0xa3, 0x7f, 0x0e, 0xe8, 0xb7, 0x2b, 0x6b, 0x89, 0xe8,
+	0xc7, 0x02, 0xb3, 0xa9, 0x38, 0xde, 0xa2, 0xec, 0x76, 0x70, 0xe6, 0x7b, 0x81, 0xed, 0xec, 0x77,
+	0xed, 0x0e, 0xc5, 0x73, 0x3a, 0x0d, 0xa0, 0xa8, 0x6d, 0xc5, 0x39, 0x4d, 0x7b, 0xd0, 0xa3, 0x21,
+	0x5c, 0x51, 0xeb, 0x94, 0xf5, 0xc8, 0xcf, 0x80, 0xe9, 0x21, 0xb9, 0x97, 0xc8, 0xe4, 0x0a, 0x88,
+	0xa6, 0xa3, 0x30, 0x58, 0xed, 0xdc, 0xb7, 0xbb, 0xf4, 0xfb, 0x67, 0x56, 0xe5, 0xfd, 0x84, 0x85,
+	0xff, 0x6c, 0xa1, 0xf9, 0x3d, 0xca, 0x0d, 0x1a, 0x79, 0x59, 0x4a, 0xd7, 0x90, 0x54, 0x4d, 0xbe,
+	0x02, 0x0d, 0x4f, 0xf0, 0xe3, 0x31, 0x34, 0xd4, 0x18, 0x30, 0x09, 0x1d, 0x6f, 0xe1, 0x04, 0x67,
+	0x40, 0x8e, 0x29, 0xe0, 0x0b, 0xb9, 0x9c, 0xe1, 0x71, 0x9c, 0x20, 0x98, 0x5d, 0x79, 0x54, 0x35,
+	0xc0, 0xd8, 0x50, 0x80, 0x93, 0x08, 0x19, 0xa9, 0x01, 0xe3, 0x5d, 0x7c, 0xe7, 0x2a, 0x8c, 0x82,
+	0xea, 0x1c, 0xe5, 0xb7, 0xc4, 0xc9, 0xdb, 0xbb, 0xe2, 0x38, 0x13, 0x83, 0xad, 0xc6, 0x59, 0x19,
+	0x77, 0x9c, 0x7f, 0xb5, 0x50, 0x7e, 0xb3, 0xcd, 0xdd, 0x53, 0x9b, 0x53, 0x20, 0x92, 0xdb, 0xc3,
+	0x98, 0xec, 0xbb, 0xc0, 0xfe, 0x15, 0xf9, 0xe9, 0x38, 0x61, 0x96, 0xd5, 0x7d, 0xe0, 0x53, 0x79,
+	0xf7, 0x17, 0x0b, 0xe5, 0xea, 0xf4, 0x94, 0x86, 0xfc, 0xff, 0xa2, 0x25, 0x04, 0x6a, 0xa5, 0xe5,
+	0xbd, 0x85, 0xe6, 0x8c, 0xe9, 0xf7, 0x32, 0x50, 0xd3, 0x9c, 0x98, 0xab, 0xb1, 0xa1, 0xaa, 0x4e,
+	0x7f, 0xdf, 0xa7, 0x8c, 0x17, 0x4b, 0x09, 0x6d, 0x84, 0xbc, 0xc0, 0x67, 0x54, 0x9f, 0x69, 0xf0,
+	0xdd, 0x61, 0x89, 0x20, 0x83, 0xd5, 0xb4, 0xbc, 0xa6, 0x2c, 0xe3, 0x33, 0x34, 0xa5, 0xa7, 0x81,
+	0x9a, 0x85, 0xc5, 0x44, 0xf8, 0x2b, 0x50, 0xdf, 0x4f, 0xcb, 0x4e, 0x45, 0x2d, 0x7f, 0x9a, 0x72,
+	0x0a, 0x8a, 0x78, 0x2c, 0x6e, 0xb6, 0x82, 0x41, 0x38, 0x3a, 0xa1, 0xed, 0x5c, 0xf8, 0xe1, 0x83,
+	0x45, 0x6c, 0xa4, 0x4d, 0x4a, 0x25, 0xc2, 0x16, 0x94, 0xcd, 0xbe, 0xa4, 0xd3, 0x4e, 0x38, 0x42,
+	0x99, 0x3d, 0xca, 0x0f, 0xfc, 0xfe, 0xbe, 0x2c, 0x47, 0xe7, 0x62, 0x4e, 0xb3, 0x0d, 0xcc, 0xe4,
+	0x33, 0xe0, 0x58, 0xc3, 0x2b, 0x89, 0x69, 0x10, 0xf8, 0x7d, 0x8d, 0x7b, 0x8e, 0xb2, 0xc6, 0x04,
+	0xf8, 0xf0, 0x61, 0x3d, 0x02, 0xca, 0x7b, 0x24, 0x2d, 0xac, 0xb6, 0xa2, 0x51, 0xcc, 0x62, 0xb3,
+	0x3e, 0x43, 0x93, 0x5b, 0x41, 0xb7, 0xeb, 0xf2, 0xff, 0x91, 0x5a, 0x2e, 0x3a, 0x9f, 0x93, 0xb4,
+	0xb0, 0xb6, 0x81, 0x24, 0x42, 0x7c, 0x88, 0xa6, 0x2f, 0x0e, 0x58, 0xf1, 0xfb, 0x6a, 0x56, 0x93,
+	0xc9, 0x0b, 0x2a, 0x01, 0xf8, 0x32, 0x2e, 0x26, 0x3a, 0x53, 0x5e, 0x4c, 0xdf, 0xa0, 0x7c, 0x04,
+	0xb1, 0xbb, 0x15, 0xf8, 0xc7, 0x6e, 0x27, 0x25, 0x4c, 0x03, 0xf3, 0x25, 0x61, 0xea, 0x75, 0x9b,
+	0x6d, 0x85, 0xe3, 0xa3, 0x39, 0xb9, 0x1c, 0x0c, 0x13, 0xc4, 0x41, 0x53, 0xf7, 0x5e, 0x75, 0xa9,
+	0x22, 0x97, 0x91, 0x09, 0x07, 0xbd, 0x8a, 0x3a, 0xe8, 0x6a, 0x77, 0xb6, 0xd1, 0x5e, 0x92, 0x77,
+	0x35, 0x8a, 0x66, 0x4d, 0xd8, 0x71, 0xae, 0x0b, 0xeb, 0x40, 0x40, 0xf0, 0x6a, 0x2a, 0x81, 0xbe,
+	0x26, 0x7c, 0x1b, 0x55, 0x2f, 0x1f, 0xe9, 0xd2, 0x4e, 0xd0, 0xf9, 0xf8, 0xeb, 0x1e, 0x4b, 0x3b,
+	0xae, 0xca, 0x67, 0x41, 0x5c, 0x47, 0xd9, 0xc1, 0xa9, 0x57, 0xb4, 0x1f, 0xf2, 0x4c, 0x0c, 0x8f,
+	0xac, 0x01, 0x5c, 0x09, 0x2f, 0x26, 0xc1, 0xc9, 0x23, 0xf0, 0x2e, 0x9a, 0x69, 0xf0, 0x90, 0xda,
+	0xdd, 0x43, 0xbb, 0xfd, 0x3b, 0xca, 0xd9, 0x41, 0x9f, 0xe3, 0x79, 0xc3, 0x11, 0xd2, 0x70, 0xd0,
+	0xe7, 0xa9, 0xf1, 0xfd, 0x68, 0xdd, 0xc2, 0x3b, 0x70, 0xd0, 0xa7, 0xee, 0x29, 0x55, 0x40, 0xfb,
+	0xfe, 0x88, 0xb7, 0xb6, 0x38, 0xfe, 0xbe, 0x4f, 0x3e, 0x7a, 0x68, 0xe1, 0x5f, 0xa1, 0xbc, 0x82,
+	0xd9, 0x3a, 0xb1, 0xfd, 0x0e, 0x85, 0x37, 0xcc, 0x74, 0x27, 0x16, 0x0c, 0xa4, 0x48, 0x17, 0x00,
+	0x7b, 0x83, 0x72, 0xf2, 0x78, 0x1f, 0x79, 0x09, 0xc5, 0x09, 0xaf, 0xa3, 0xc5, 0x84, 0x3a, 0xb2,
+	0x0a, 0xbe, 0x2b, 0x92, 0x39, 0xed, 0x3b, 0xe3, 0xa9, 0x55, 0xa6, 0xaa, 0xd8, 0x1e, 0xa2, 0xd8,
+	0xe6, 0x4d, 0x37, 0x86, 0x99, 0x90, 0xaa, 0x06, 0xa8, 0x8c, 0x48, 0x0b, 0xe5, 0xe4, 0x8c, 0xfb,
+	0x10, 0xd5, 0x9f, 0x02, 0xc1, 0x4a, 0x71, 0x04, 0x81, 0x90, 0xde, 0x46, 0x39, 0x79, 0x82, 0xbe,
+	0x8c, 0x23, 0x2d, 0xe4, 0x6a, 0x20, 0x95, 0x51, 0x03, 0xf9, 0x0e, 0xcd, 0x88, 0xc9, 0x10, 0x75,
+	0xc0, 0x88, 0xd9, 0x90, 0xe0, 0xad, 0x25, 0x20, 0x59, 0xc0, 0xc9, 0x21, 0xc0, 0xcf, 0xe1, 0x12,
+	0x98, 0xb0, 0x2d, 0x4d, 0x19, 0x07, 0x17, 0xa6, 0x6f, 0x16, 0xb8, 0x94, 0x7e, 0x34, 0x61, 0xf8,
+	0xd7, 0xe8, 0x66, 0x83, 0x7a, 0xc7, 0x2f, 0x29, 0xe3, 0x06, 0x58, 0x41, 0x83, 0x69, 0xeb, 0x60,
+	0xf1, 0xbf, 0x0b, 0xb0, 0xab, 0x64, 0x39, 0x11, 0x96, 0x51, 0xef, 0x18, 0x3e, 0xa2, 0xe0, 0x23,
+	0x38, 0x4d, 0x1b, 0x2f, 0xea, 0xc3, 0x6f, 0x22, 0xb1, 0x27, 0xf7, 0xf8, 0xd4, 0x15, 0x89, 0x2e,
+	0xda, 0xa9, 0xc7, 0x10, 0xb7, 0x85, 0xbf, 0x45, 0x78, 0x8f, 0xf2, 0xa1, 0x67, 0x76, 0x03, 0x39,
+	0xf9, 0x25, 0x3e, 0xee, 0x0f, 0x13, 0x1b, 0x1e, 0xf5, 0x31, 0x43, 0xd9, 0x86, 0xdb, 0xed, 0x7b,
+	0x36, 0xa7, 0xd0, 0x1f, 0x97, 0x07, 0x8e, 0x88, 0x56, 0xeb, 0x6d, 0x32, 0xe5, 0x54, 0x18, 0x7b,
+	0xcc, 0x32, 0x7d, 0xa4, 0x90, 0x9a, 0x02, 0x49, 0xe4, 0xe5, 0x0b, 0x84, 0xe4, 0x25, 0x1a, 0xde,
+	0x0e, 0x33, 0xd1, 0xdd, 0x30, 0x35, 0x15, 0xd5, 0xeb, 0x03, 0x99, 0x12, 0xf0, 0x17, 0xbd, 0xd5,
+	0xfb, 0x88, 0xba, 0x3a, 0x8f, 0x81, 0x77, 0x71, 0xc9, 0x3f, 0x7d, 0x54, 0x8b, 0x74, 0x17, 0x80,
+	0xdf, 0xa1, 0x49, 0x31, 0xe5, 0xdf, 0xf2, 0x23, 0xdb, 0xeb, 0x53, 0xbc, 0x58, 0x1d, 0x7c, 0x8e,
+	0xab, 0x42, 0x4d, 0xa3, 0x47, 0xdb, 0xee, 0xb1, 0x4b, 0xc3, 0xe2, 0x42, 0xc4, 0x54, 0xa7, 0xbc,
+	0x1f, 0xfa, 0xd0, 0x80, 0x91, 0x12, 0xc0, 0xcf, 0xe1, 0xbc, 0xf6, 0x48, 0x14, 0xf0, 0x35, 0x9a,
+	0x6c, 0x44, 0x8a, 0xf9, 0x18, 0x3e, 0x4d, 0xd7, 0x1d, 0x03, 0x8e, 0x22, 0x39, 0x28, 0xdf, 0xe0,
+	0x76, 0xc8, 0x0f, 0xba, 0x6d, 0x57, 0x24, 0xb1, 0x38, 0x7a, 0x05, 0x3e, 0x56, 0x09, 0xa2, 0x6b,
+	0x75, 0x30, 0xb1, 0xac, 0x36, 0xb2, 0x5d, 0xcd, 0x78, 0x32, 0x78, 0xf6, 0x67, 0x02, 0xaf, 0x39,
+	0xf8, 0x56, 0xf8, 0xcc, 0xaa, 0x7c, 0xed, 0xa1, 0x7c, 0x10, 0x76, 0x60, 0x4d, 0x6e, 0x07, 0xa1,
+	0xa3, 0xbe, 0x80, 0x7c, 0x9d, 0x91, 0x5f, 0x68, 0x0e, 0xe1, 0xeb, 0xe2, 0x6f, 0xab, 0x1d, 0x97,
+	0x9f, 0xf4, 0x5b, 0x22, 0x5f, 0x6a, 0xba, 0xa5, 0xfa, 0x0e, 0xfc, 0x40, 0x7f, 0x15, 0x7e, 0x5a,
+	0xeb, 0x04, 0xaa, 0xee, 0x1f, 0x13, 0xf3, 0x07, 0x1a, 0xef, 0x28, 0xfa, 0xc1, 0xe7, 0x70, 0xa2,
+	0x75, 0x1d, 0xda, 0x6f, 0xfc, 0x37, 0x00, 0x00, 0xff, 0xff, 0x37, 0x95, 0xcc, 0x79, 0x55, 0x1e,
+	0x00, 0x00,
+}
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ context.Context
+var _ grpc.ClientConn
+
+// This is a compile-time assertion to ensure that this generated file
+// is compatible with the grpc package it is being compiled against.
+const _ = grpc.SupportPackageIsVersion4
+
+// VolthaServiceClient is the client API for VolthaService service.
+//
+// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
+type VolthaServiceClient interface {
+	// Get high level information on the Voltha cluster
+	GetVoltha(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Voltha, error)
+	// List all Voltha cluster core instances
+	ListCoreInstances(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*CoreInstances, error)
+	// Get details on a Voltha cluster instance
+	GetCoreInstance(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*CoreInstance, error)
+	// List all active adapters (plugins) in the Voltha cluster
+	ListAdapters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Adapters, error)
+	// List all logical devices managed by the Voltha cluster
+	ListLogicalDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LogicalDevices, error)
+	// Get additional information on a given logical device
+	GetLogicalDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalDevice, error)
+	// List ports of a logical device
+	ListLogicalDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalPorts, error)
+	// Gets a logical device port
+	GetLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*LogicalPort, error)
+	// Enables a logical device port
+	EnableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Disables a logical device port
+	DisableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all flows of a logical device
+	ListLogicalDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error)
+	// Update flow table for logical device
+	UpdateLogicalDeviceFlowTable(ctx context.Context, in *openflow_13.FlowTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Update meter table for logical device
+	UpdateLogicalDeviceMeterTable(ctx context.Context, in *openflow_13.MeterModUpdate, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all meters of a logical device
+	ListLogicalDeviceMeters(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Meters, error)
+	// List all flow groups of a logical device
+	ListLogicalDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error)
+	// Update group table for device
+	UpdateLogicalDeviceFlowGroupTable(ctx context.Context, in *openflow_13.FlowGroupTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all physical devices controlled by the Voltha cluster
+	ListDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Devices, error)
+	// List all physical devices IDs controlled by the Voltha cluster
+	ListDeviceIds(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*common.IDs, error)
+	// Request to a voltha Core to reconcile a set of devices based on their IDs
+	ReconcileDevices(ctx context.Context, in *common.IDs, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Get more information on a given physical device
+	GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Device, error)
+	// Pre-provision a new physical device
+	CreateDevice(ctx context.Context, in *Device, opts ...grpc.CallOption) (*Device, error)
+	// Enable a device.  If the device was in pre-provisioned state then it
+	// will transition to ENABLED state.  If it was is DISABLED state then it
+	// will transition to ENABLED state as well.
+	EnableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Disable a device
+	DisableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Reboot a device
+	RebootDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Delete a device
+	DeleteDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Forcefully delete a device
+	ForceDeleteDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Request an image download to the standby partition
+	// of a device.
+	// Note that the call is expected to be non-blocking.
+	DownloadImage(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Get image download status on a device
+	// The request retrieves progress on device and updates db record
+	// Deprecated in voltha 2.8, will be removed
+	GetImageDownloadStatus(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error)
+	// Get image download db record
+	// Deprecated in voltha 2.8, will be removed
+	GetImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error)
+	// List image download db records for a given device
+	// Deprecated in voltha 2.8, will be removed
+	ListImageDownloads(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*ImageDownloads, error)
+	// Cancel an existing image download process on a device
+	// Deprecated in voltha 2.8, will be removed
+	CancelImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Activate the specified image at a standby partition
+	// to active partition.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device
+	// Note that the call is expected to be non-blocking.
+	// Deprecated in voltha 2.8, will be removed
+	ActivateImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Revert the specified image at standby partition
+	// to active partition, and revert to previous image
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// previous image running on device
+	// Note that the call is expected to be non-blocking.
+	// Deprecated in voltha 2.8, will be removed
+	RevertImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error)
+	// Downloads a certain image to the standby partition of the devices
+	// Note that the call is expected to be non-blocking.
+	DownloadImageToDevice(ctx context.Context, in *DeviceImageDownloadRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error)
+	// Get image status on a number of devices devices
+	// Polled from northbound systems to get state of download/activate/commit
+	GetImageStatus(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error)
+	// Aborts the upgrade of an image on a device
+	// To be used carefully, stops any further operations for the Image on the given devices
+	// Might also stop if possible existing work, but no guarantees are given,
+	// depends on implementation and procedure status.
+	AbortImageUpgradeToDevice(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error)
+	// Get Both Active and Standby image for a given device
+	GetOnuImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*OnuImages, error)
+	// Activate the specified image from a standby partition
+	// to active partition.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device
+	// Note that the call is expected to be non-blocking.
+	ActivateImage(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error)
+	// Commit the specified image to be default.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device upon next reboot
+	// Note that the call is expected to be non-blocking.
+	CommitImage(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error)
+	// List ports of a device
+	ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Ports, error)
+	// List pm config of a device
+	ListDevicePmConfigs(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*PmConfigs, error)
+	// Update the pm config of a device
+	UpdateDevicePmConfigs(ctx context.Context, in *PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error)
+	// List all flows of a device
+	ListDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error)
+	// List all flow groups of a device
+	ListDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error)
+	// List device types known to Voltha
+	ListDeviceTypes(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*DeviceTypes, error)
+	// Get additional information on a device type
+	GetDeviceType(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*DeviceType, error)
+	// Stream control packets to the dataplane
+	StreamPacketsOut(ctx context.Context, opts ...grpc.CallOption) (VolthaService_StreamPacketsOutClient, error)
+	// Receive control packet stream
+	ReceivePacketsIn(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceivePacketsInClient, error)
+	ReceiveChangeEvents(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceiveChangeEventsClient, error)
+	CreateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error)
+	// Get all filters present for a device
+	GetEventFilter(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*EventFilters, error)
+	UpdateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error)
+	DeleteEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*empty.Empty, error)
+	// Get all the filters present
+	ListEventFilters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*EventFilters, error)
+	GetImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Images, error)
+	SelfTest(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*SelfTestResponse, error)
+	// OpenOMCI MIB information
+	GetMibDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.MibDeviceData, error)
+	// OpenOMCI ALARM information
+	GetAlarmDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.AlarmDeviceData, error)
+	// Simulate an Alarm
+	SimulateAlarm(ctx context.Context, in *SimulateAlarmRequest, opts ...grpc.CallOption) (*common.OperationResp, error)
+	EnablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	DisablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error)
+	GetExtValue(ctx context.Context, in *extension.ValueSpecifier, opts ...grpc.CallOption) (*extension.ReturnValues, error)
+	SetExtValue(ctx context.Context, in *extension.ValueSet, opts ...grpc.CallOption) (*empty.Empty, error)
+	// omci start and stop cli implementation
+	StartOmciTestAction(ctx context.Context, in *omci.OmciTestRequest, opts ...grpc.CallOption) (*omci.TestResponse, error)
+}
+
+type volthaServiceClient struct {
+	cc *grpc.ClientConn
+}
+
+func NewVolthaServiceClient(cc *grpc.ClientConn) VolthaServiceClient {
+	return &volthaServiceClient{cc}
+}
+
+func (c *volthaServiceClient) GetVoltha(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Voltha, error) {
+	out := new(Voltha)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetVoltha", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListCoreInstances(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*CoreInstances, error) {
+	out := new(CoreInstances)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListCoreInstances", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetCoreInstance(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*CoreInstance, error) {
+	out := new(CoreInstance)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetCoreInstance", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListAdapters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Adapters, error) {
+	out := new(Adapters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListAdapters", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*LogicalDevices, error) {
+	out := new(LogicalDevices)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetLogicalDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalDevice, error) {
+	out := new(LogicalDevice)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetLogicalDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*LogicalPorts, error) {
+	out := new(LogicalPorts)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDevicePorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*LogicalPort, error) {
+	out := new(LogicalPort)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetLogicalDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) EnableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/EnableLogicalDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DisableLogicalDevicePort(ctx context.Context, in *LogicalPortId, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DisableLogicalDevicePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error) {
+	out := new(openflow_13.Flows)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDeviceFlows", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateLogicalDeviceFlowTable(ctx context.Context, in *openflow_13.FlowTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogicalDeviceFlowTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateLogicalDeviceMeterTable(ctx context.Context, in *openflow_13.MeterModUpdate, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogicalDeviceMeterTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDeviceMeters(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Meters, error) {
+	out := new(openflow_13.Meters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDeviceMeters", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListLogicalDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error) {
+	out := new(openflow_13.FlowGroups)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListLogicalDeviceFlowGroups", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateLogicalDeviceFlowGroupTable(ctx context.Context, in *openflow_13.FlowGroupTableUpdate, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateLogicalDeviceFlowGroupTable", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDevices(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*Devices, error) {
+	out := new(Devices)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceIds(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*common.IDs, error) {
+	out := new(common.IDs)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceIds", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ReconcileDevices(ctx context.Context, in *common.IDs, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ReconcileDevices", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Device, error) {
+	out := new(Device)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) CreateDevice(ctx context.Context, in *Device, opts ...grpc.CallOption) (*Device, error) {
+	out := new(Device)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CreateDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) EnableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/EnableDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DisableDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DisableDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) RebootDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/RebootDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DeleteDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DeleteDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ForceDeleteDevice(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ForceDeleteDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) DownloadImage(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DownloadImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) GetImageDownloadStatus(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error) {
+	out := new(ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImageDownloadStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) GetImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*ImageDownload, error) {
+	out := new(ImageDownload)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImageDownload", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) ListImageDownloads(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*ImageDownloads, error) {
+	out := new(ImageDownloads)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListImageDownloads", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) CancelImageDownload(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CancelImageDownload", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) ActivateImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ActivateImageUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// Deprecated: Do not use.
+func (c *volthaServiceClient) RevertImageUpdate(ctx context.Context, in *ImageDownload, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/RevertImageUpdate", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DownloadImageToDevice(ctx context.Context, in *DeviceImageDownloadRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error) {
+	out := new(DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DownloadImageToDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetImageStatus(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error) {
+	out := new(DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImageStatus", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) AbortImageUpgradeToDevice(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error) {
+	out := new(DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/AbortImageUpgradeToDevice", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetOnuImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*OnuImages, error) {
+	out := new(OnuImages)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetOnuImages", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ActivateImage(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error) {
+	out := new(DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ActivateImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) CommitImage(ctx context.Context, in *DeviceImageRequest, opts ...grpc.CallOption) (*DeviceImageResponse, error) {
+	out := new(DeviceImageResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CommitImage", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDevicePorts(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Ports, error) {
+	out := new(Ports)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDevicePorts", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDevicePmConfigs(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*PmConfigs, error) {
+	out := new(PmConfigs)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDevicePmConfigs", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateDevicePmConfigs(ctx context.Context, in *PmConfigs, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateDevicePmConfigs", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceFlows(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.Flows, error) {
+	out := new(openflow_13.Flows)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceFlows", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceFlowGroups(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*openflow_13.FlowGroups, error) {
+	out := new(openflow_13.FlowGroups)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceFlowGroups", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListDeviceTypes(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*DeviceTypes, error) {
+	out := new(DeviceTypes)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListDeviceTypes", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetDeviceType(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*DeviceType, error) {
+	out := new(DeviceType)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetDeviceType", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) StreamPacketsOut(ctx context.Context, opts ...grpc.CallOption) (VolthaService_StreamPacketsOutClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_VolthaService_serviceDesc.Streams[0], "/voltha.VolthaService/StreamPacketsOut", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &volthaServiceStreamPacketsOutClient{stream}
+	return x, nil
+}
+
+type VolthaService_StreamPacketsOutClient interface {
+	Send(*openflow_13.PacketOut) error
+	CloseAndRecv() (*empty.Empty, error)
+	grpc.ClientStream
+}
+
+type volthaServiceStreamPacketsOutClient struct {
+	grpc.ClientStream
+}
+
+func (x *volthaServiceStreamPacketsOutClient) Send(m *openflow_13.PacketOut) error {
+	return x.ClientStream.SendMsg(m)
+}
+
+func (x *volthaServiceStreamPacketsOutClient) CloseAndRecv() (*empty.Empty, error) {
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	m := new(empty.Empty)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *volthaServiceClient) ReceivePacketsIn(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceivePacketsInClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_VolthaService_serviceDesc.Streams[1], "/voltha.VolthaService/ReceivePacketsIn", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &volthaServiceReceivePacketsInClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type VolthaService_ReceivePacketsInClient interface {
+	Recv() (*openflow_13.PacketIn, error)
+	grpc.ClientStream
+}
+
+type volthaServiceReceivePacketsInClient struct {
+	grpc.ClientStream
+}
+
+func (x *volthaServiceReceivePacketsInClient) Recv() (*openflow_13.PacketIn, error) {
+	m := new(openflow_13.PacketIn)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *volthaServiceClient) ReceiveChangeEvents(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (VolthaService_ReceiveChangeEventsClient, error) {
+	stream, err := c.cc.NewStream(ctx, &_VolthaService_serviceDesc.Streams[2], "/voltha.VolthaService/ReceiveChangeEvents", opts...)
+	if err != nil {
+		return nil, err
+	}
+	x := &volthaServiceReceiveChangeEventsClient{stream}
+	if err := x.ClientStream.SendMsg(in); err != nil {
+		return nil, err
+	}
+	if err := x.ClientStream.CloseSend(); err != nil {
+		return nil, err
+	}
+	return x, nil
+}
+
+type VolthaService_ReceiveChangeEventsClient interface {
+	Recv() (*openflow_13.ChangeEvent, error)
+	grpc.ClientStream
+}
+
+type volthaServiceReceiveChangeEventsClient struct {
+	grpc.ClientStream
+}
+
+func (x *volthaServiceReceiveChangeEventsClient) Recv() (*openflow_13.ChangeEvent, error) {
+	m := new(openflow_13.ChangeEvent)
+	if err := x.ClientStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func (c *volthaServiceClient) CreateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error) {
+	out := new(EventFilter)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/CreateEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetEventFilter(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*EventFilters, error) {
+	out := new(EventFilters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) UpdateEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*EventFilter, error) {
+	out := new(EventFilter)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/UpdateEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DeleteEventFilter(ctx context.Context, in *EventFilter, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DeleteEventFilter", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) ListEventFilters(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*EventFilters, error) {
+	out := new(EventFilters)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/ListEventFilters", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetImages(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*Images, error) {
+	out := new(Images)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetImages", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) SelfTest(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*SelfTestResponse, error) {
+	out := new(SelfTestResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/SelfTest", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetMibDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.MibDeviceData, error) {
+	out := new(omci.MibDeviceData)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetMibDeviceData", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetAlarmDeviceData(ctx context.Context, in *common.ID, opts ...grpc.CallOption) (*omci.AlarmDeviceData, error) {
+	out := new(omci.AlarmDeviceData)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetAlarmDeviceData", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) SimulateAlarm(ctx context.Context, in *SimulateAlarmRequest, opts ...grpc.CallOption) (*common.OperationResp, error) {
+	out := new(common.OperationResp)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/SimulateAlarm", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) EnablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/EnablePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) DisablePort(ctx context.Context, in *Port, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/DisablePort", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) GetExtValue(ctx context.Context, in *extension.ValueSpecifier, opts ...grpc.CallOption) (*extension.ReturnValues, error) {
+	out := new(extension.ReturnValues)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/GetExtValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) SetExtValue(ctx context.Context, in *extension.ValueSet, opts ...grpc.CallOption) (*empty.Empty, error) {
+	out := new(empty.Empty)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/SetExtValue", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+func (c *volthaServiceClient) StartOmciTestAction(ctx context.Context, in *omci.OmciTestRequest, opts ...grpc.CallOption) (*omci.TestResponse, error) {
+	out := new(omci.TestResponse)
+	err := c.cc.Invoke(ctx, "/voltha.VolthaService/StartOmciTestAction", in, out, opts...)
+	if err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// VolthaServiceServer is the server API for VolthaService service.
+type VolthaServiceServer interface {
+	// Get high level information on the Voltha cluster
+	GetVoltha(context.Context, *empty.Empty) (*Voltha, error)
+	// List all Voltha cluster core instances
+	ListCoreInstances(context.Context, *empty.Empty) (*CoreInstances, error)
+	// Get details on a Voltha cluster instance
+	GetCoreInstance(context.Context, *common.ID) (*CoreInstance, error)
+	// List all active adapters (plugins) in the Voltha cluster
+	ListAdapters(context.Context, *empty.Empty) (*Adapters, error)
+	// List all logical devices managed by the Voltha cluster
+	ListLogicalDevices(context.Context, *empty.Empty) (*LogicalDevices, error)
+	// Get additional information on a given logical device
+	GetLogicalDevice(context.Context, *common.ID) (*LogicalDevice, error)
+	// List ports of a logical device
+	ListLogicalDevicePorts(context.Context, *common.ID) (*LogicalPorts, error)
+	// Gets a logical device port
+	GetLogicalDevicePort(context.Context, *LogicalPortId) (*LogicalPort, error)
+	// Enables a logical device port
+	EnableLogicalDevicePort(context.Context, *LogicalPortId) (*empty.Empty, error)
+	// Disables a logical device port
+	DisableLogicalDevicePort(context.Context, *LogicalPortId) (*empty.Empty, error)
+	// List all flows of a logical device
+	ListLogicalDeviceFlows(context.Context, *common.ID) (*openflow_13.Flows, error)
+	// Update flow table for logical device
+	UpdateLogicalDeviceFlowTable(context.Context, *openflow_13.FlowTableUpdate) (*empty.Empty, error)
+	// Update meter table for logical device
+	UpdateLogicalDeviceMeterTable(context.Context, *openflow_13.MeterModUpdate) (*empty.Empty, error)
+	// List all meters of a logical device
+	ListLogicalDeviceMeters(context.Context, *common.ID) (*openflow_13.Meters, error)
+	// List all flow groups of a logical device
+	ListLogicalDeviceFlowGroups(context.Context, *common.ID) (*openflow_13.FlowGroups, error)
+	// Update group table for device
+	UpdateLogicalDeviceFlowGroupTable(context.Context, *openflow_13.FlowGroupTableUpdate) (*empty.Empty, error)
+	// List all physical devices controlled by the Voltha cluster
+	ListDevices(context.Context, *empty.Empty) (*Devices, error)
+	// List all physical devices IDs controlled by the Voltha cluster
+	ListDeviceIds(context.Context, *empty.Empty) (*common.IDs, error)
+	// Request to a voltha Core to reconcile a set of devices based on their IDs
+	ReconcileDevices(context.Context, *common.IDs) (*empty.Empty, error)
+	// Get more information on a given physical device
+	GetDevice(context.Context, *common.ID) (*Device, error)
+	// Pre-provision a new physical device
+	CreateDevice(context.Context, *Device) (*Device, error)
+	// Enable a device.  If the device was in pre-provisioned state then it
+	// will transition to ENABLED state.  If it was is DISABLED state then it
+	// will transition to ENABLED state as well.
+	EnableDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Disable a device
+	DisableDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Reboot a device
+	RebootDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Delete a device
+	DeleteDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Forcefully delete a device
+	ForceDeleteDevice(context.Context, *common.ID) (*empty.Empty, error)
+	// Request an image download to the standby partition
+	// of a device.
+	// Note that the call is expected to be non-blocking.
+	DownloadImage(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Get image download status on a device
+	// The request retrieves progress on device and updates db record
+	// Deprecated in voltha 2.8, will be removed
+	GetImageDownloadStatus(context.Context, *ImageDownload) (*ImageDownload, error)
+	// Get image download db record
+	// Deprecated in voltha 2.8, will be removed
+	GetImageDownload(context.Context, *ImageDownload) (*ImageDownload, error)
+	// List image download db records for a given device
+	// Deprecated in voltha 2.8, will be removed
+	ListImageDownloads(context.Context, *common.ID) (*ImageDownloads, error)
+	// Cancel an existing image download process on a device
+	// Deprecated in voltha 2.8, will be removed
+	CancelImageDownload(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Activate the specified image at a standby partition
+	// to active partition.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device
+	// Note that the call is expected to be non-blocking.
+	// Deprecated in voltha 2.8, will be removed
+	ActivateImageUpdate(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Revert the specified image at standby partition
+	// to active partition, and revert to previous image
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// previous image running on device
+	// Note that the call is expected to be non-blocking.
+	// Deprecated in voltha 2.8, will be removed
+	RevertImageUpdate(context.Context, *ImageDownload) (*common.OperationResp, error)
+	// Downloads a certain image to the standby partition of the devices
+	// Note that the call is expected to be non-blocking.
+	DownloadImageToDevice(context.Context, *DeviceImageDownloadRequest) (*DeviceImageResponse, error)
+	// Get image status on a number of devices devices
+	// Polled from northbound systems to get state of download/activate/commit
+	GetImageStatus(context.Context, *DeviceImageRequest) (*DeviceImageResponse, error)
+	// Aborts the upgrade of an image on a device
+	// To be used carefully, stops any further operations for the Image on the given devices
+	// Might also stop if possible existing work, but no guarantees are given,
+	// depends on implementation and procedure status.
+	AbortImageUpgradeToDevice(context.Context, *DeviceImageRequest) (*DeviceImageResponse, error)
+	// Get Both Active and Standby image for a given device
+	GetOnuImages(context.Context, *common.ID) (*OnuImages, error)
+	// Activate the specified image from a standby partition
+	// to active partition.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device
+	// Note that the call is expected to be non-blocking.
+	ActivateImage(context.Context, *DeviceImageRequest) (*DeviceImageResponse, error)
+	// Commit the specified image to be default.
+	// Depending on the device implementation, this call
+	// may or may not cause device reboot.
+	// If no reboot, then a reboot is required to make the
+	// activated image running on device upon next reboot
+	// Note that the call is expected to be non-blocking.
+	CommitImage(context.Context, *DeviceImageRequest) (*DeviceImageResponse, error)
+	// List ports of a device
+	ListDevicePorts(context.Context, *common.ID) (*Ports, error)
+	// List pm config of a device
+	ListDevicePmConfigs(context.Context, *common.ID) (*PmConfigs, error)
+	// Update the pm config of a device
+	UpdateDevicePmConfigs(context.Context, *PmConfigs) (*empty.Empty, error)
+	// List all flows of a device
+	ListDeviceFlows(context.Context, *common.ID) (*openflow_13.Flows, error)
+	// List all flow groups of a device
+	ListDeviceFlowGroups(context.Context, *common.ID) (*openflow_13.FlowGroups, error)
+	// List device types known to Voltha
+	ListDeviceTypes(context.Context, *empty.Empty) (*DeviceTypes, error)
+	// Get additional information on a device type
+	GetDeviceType(context.Context, *common.ID) (*DeviceType, error)
+	// Stream control packets to the dataplane
+	StreamPacketsOut(VolthaService_StreamPacketsOutServer) error
+	// Receive control packet stream
+	ReceivePacketsIn(*empty.Empty, VolthaService_ReceivePacketsInServer) error
+	ReceiveChangeEvents(*empty.Empty, VolthaService_ReceiveChangeEventsServer) error
+	CreateEventFilter(context.Context, *EventFilter) (*EventFilter, error)
+	// Get all filters present for a device
+	GetEventFilter(context.Context, *common.ID) (*EventFilters, error)
+	UpdateEventFilter(context.Context, *EventFilter) (*EventFilter, error)
+	DeleteEventFilter(context.Context, *EventFilter) (*empty.Empty, error)
+	// Get all the filters present
+	ListEventFilters(context.Context, *empty.Empty) (*EventFilters, error)
+	GetImages(context.Context, *common.ID) (*Images, error)
+	SelfTest(context.Context, *common.ID) (*SelfTestResponse, error)
+	// OpenOMCI MIB information
+	GetMibDeviceData(context.Context, *common.ID) (*omci.MibDeviceData, error)
+	// OpenOMCI ALARM information
+	GetAlarmDeviceData(context.Context, *common.ID) (*omci.AlarmDeviceData, error)
+	// Simulate an Alarm
+	SimulateAlarm(context.Context, *SimulateAlarmRequest) (*common.OperationResp, error)
+	EnablePort(context.Context, *Port) (*empty.Empty, error)
+	DisablePort(context.Context, *Port) (*empty.Empty, error)
+	GetExtValue(context.Context, *extension.ValueSpecifier) (*extension.ReturnValues, error)
+	SetExtValue(context.Context, *extension.ValueSet) (*empty.Empty, error)
+	// omci start and stop cli implementation
+	StartOmciTestAction(context.Context, *omci.OmciTestRequest) (*omci.TestResponse, error)
+}
+
+// UnimplementedVolthaServiceServer can be embedded to have forward compatible implementations.
+type UnimplementedVolthaServiceServer struct {
+}
+
+func (*UnimplementedVolthaServiceServer) GetVoltha(ctx context.Context, req *empty.Empty) (*Voltha, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetVoltha not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListCoreInstances(ctx context.Context, req *empty.Empty) (*CoreInstances, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListCoreInstances not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetCoreInstance(ctx context.Context, req *common.ID) (*CoreInstance, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetCoreInstance not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListAdapters(ctx context.Context, req *empty.Empty) (*Adapters, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListAdapters not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListLogicalDevices(ctx context.Context, req *empty.Empty) (*LogicalDevices, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListLogicalDevices not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetLogicalDevice(ctx context.Context, req *common.ID) (*LogicalDevice, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetLogicalDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListLogicalDevicePorts(ctx context.Context, req *common.ID) (*LogicalPorts, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListLogicalDevicePorts not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetLogicalDevicePort(ctx context.Context, req *LogicalPortId) (*LogicalPort, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetLogicalDevicePort not implemented")
+}
+func (*UnimplementedVolthaServiceServer) EnableLogicalDevicePort(ctx context.Context, req *LogicalPortId) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method EnableLogicalDevicePort not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DisableLogicalDevicePort(ctx context.Context, req *LogicalPortId) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DisableLogicalDevicePort not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListLogicalDeviceFlows(ctx context.Context, req *common.ID) (*openflow_13.Flows, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListLogicalDeviceFlows not implemented")
+}
+func (*UnimplementedVolthaServiceServer) UpdateLogicalDeviceFlowTable(ctx context.Context, req *openflow_13.FlowTableUpdate) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateLogicalDeviceFlowTable not implemented")
+}
+func (*UnimplementedVolthaServiceServer) UpdateLogicalDeviceMeterTable(ctx context.Context, req *openflow_13.MeterModUpdate) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateLogicalDeviceMeterTable not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListLogicalDeviceMeters(ctx context.Context, req *common.ID) (*openflow_13.Meters, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListLogicalDeviceMeters not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListLogicalDeviceFlowGroups(ctx context.Context, req *common.ID) (*openflow_13.FlowGroups, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListLogicalDeviceFlowGroups not implemented")
+}
+func (*UnimplementedVolthaServiceServer) UpdateLogicalDeviceFlowGroupTable(ctx context.Context, req *openflow_13.FlowGroupTableUpdate) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateLogicalDeviceFlowGroupTable not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDevices(ctx context.Context, req *empty.Empty) (*Devices, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDevices not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDeviceIds(ctx context.Context, req *empty.Empty) (*common.IDs, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDeviceIds not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ReconcileDevices(ctx context.Context, req *common.IDs) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ReconcileDevices not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetDevice(ctx context.Context, req *common.ID) (*Device, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) CreateDevice(ctx context.Context, req *Device) (*Device, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) EnableDevice(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method EnableDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DisableDevice(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DisableDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) RebootDevice(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RebootDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DeleteDevice(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ForceDeleteDevice(ctx context.Context, req *common.ID) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ForceDeleteDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DownloadImage(ctx context.Context, req *ImageDownload) (*common.OperationResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DownloadImage not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetImageDownloadStatus(ctx context.Context, req *ImageDownload) (*ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetImageDownloadStatus not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetImageDownload(ctx context.Context, req *ImageDownload) (*ImageDownload, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetImageDownload not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListImageDownloads(ctx context.Context, req *common.ID) (*ImageDownloads, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListImageDownloads not implemented")
+}
+func (*UnimplementedVolthaServiceServer) CancelImageDownload(ctx context.Context, req *ImageDownload) (*common.OperationResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CancelImageDownload not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ActivateImageUpdate(ctx context.Context, req *ImageDownload) (*common.OperationResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ActivateImageUpdate not implemented")
+}
+func (*UnimplementedVolthaServiceServer) RevertImageUpdate(ctx context.Context, req *ImageDownload) (*common.OperationResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method RevertImageUpdate not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DownloadImageToDevice(ctx context.Context, req *DeviceImageDownloadRequest) (*DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DownloadImageToDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetImageStatus(ctx context.Context, req *DeviceImageRequest) (*DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetImageStatus not implemented")
+}
+func (*UnimplementedVolthaServiceServer) AbortImageUpgradeToDevice(ctx context.Context, req *DeviceImageRequest) (*DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method AbortImageUpgradeToDevice not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetOnuImages(ctx context.Context, req *common.ID) (*OnuImages, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetOnuImages not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ActivateImage(ctx context.Context, req *DeviceImageRequest) (*DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ActivateImage not implemented")
+}
+func (*UnimplementedVolthaServiceServer) CommitImage(ctx context.Context, req *DeviceImageRequest) (*DeviceImageResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CommitImage not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDevicePorts(ctx context.Context, req *common.ID) (*Ports, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDevicePorts not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDevicePmConfigs(ctx context.Context, req *common.ID) (*PmConfigs, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDevicePmConfigs not implemented")
+}
+func (*UnimplementedVolthaServiceServer) UpdateDevicePmConfigs(ctx context.Context, req *PmConfigs) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateDevicePmConfigs not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDeviceFlows(ctx context.Context, req *common.ID) (*openflow_13.Flows, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDeviceFlows not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDeviceFlowGroups(ctx context.Context, req *common.ID) (*openflow_13.FlowGroups, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDeviceFlowGroups not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListDeviceTypes(ctx context.Context, req *empty.Empty) (*DeviceTypes, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListDeviceTypes not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetDeviceType(ctx context.Context, req *common.ID) (*DeviceType, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetDeviceType not implemented")
+}
+func (*UnimplementedVolthaServiceServer) StreamPacketsOut(srv VolthaService_StreamPacketsOutServer) error {
+	return status.Errorf(codes.Unimplemented, "method StreamPacketsOut not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ReceivePacketsIn(req *empty.Empty, srv VolthaService_ReceivePacketsInServer) error {
+	return status.Errorf(codes.Unimplemented, "method ReceivePacketsIn not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ReceiveChangeEvents(req *empty.Empty, srv VolthaService_ReceiveChangeEventsServer) error {
+	return status.Errorf(codes.Unimplemented, "method ReceiveChangeEvents not implemented")
+}
+func (*UnimplementedVolthaServiceServer) CreateEventFilter(ctx context.Context, req *EventFilter) (*EventFilter, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method CreateEventFilter not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetEventFilter(ctx context.Context, req *common.ID) (*EventFilters, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetEventFilter not implemented")
+}
+func (*UnimplementedVolthaServiceServer) UpdateEventFilter(ctx context.Context, req *EventFilter) (*EventFilter, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method UpdateEventFilter not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DeleteEventFilter(ctx context.Context, req *EventFilter) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DeleteEventFilter not implemented")
+}
+func (*UnimplementedVolthaServiceServer) ListEventFilters(ctx context.Context, req *empty.Empty) (*EventFilters, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method ListEventFilters not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetImages(ctx context.Context, req *common.ID) (*Images, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetImages not implemented")
+}
+func (*UnimplementedVolthaServiceServer) SelfTest(ctx context.Context, req *common.ID) (*SelfTestResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SelfTest not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetMibDeviceData(ctx context.Context, req *common.ID) (*omci.MibDeviceData, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetMibDeviceData not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetAlarmDeviceData(ctx context.Context, req *common.ID) (*omci.AlarmDeviceData, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetAlarmDeviceData not implemented")
+}
+func (*UnimplementedVolthaServiceServer) SimulateAlarm(ctx context.Context, req *SimulateAlarmRequest) (*common.OperationResp, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SimulateAlarm not implemented")
+}
+func (*UnimplementedVolthaServiceServer) EnablePort(ctx context.Context, req *Port) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method EnablePort not implemented")
+}
+func (*UnimplementedVolthaServiceServer) DisablePort(ctx context.Context, req *Port) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method DisablePort not implemented")
+}
+func (*UnimplementedVolthaServiceServer) GetExtValue(ctx context.Context, req *extension.ValueSpecifier) (*extension.ReturnValues, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method GetExtValue not implemented")
+}
+func (*UnimplementedVolthaServiceServer) SetExtValue(ctx context.Context, req *extension.ValueSet) (*empty.Empty, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method SetExtValue not implemented")
+}
+func (*UnimplementedVolthaServiceServer) StartOmciTestAction(ctx context.Context, req *omci.OmciTestRequest) (*omci.TestResponse, error) {
+	return nil, status.Errorf(codes.Unimplemented, "method StartOmciTestAction not implemented")
+}
+
+func RegisterVolthaServiceServer(s *grpc.Server, srv VolthaServiceServer) {
+	s.RegisterService(&_VolthaService_serviceDesc, srv)
+}
+
+func _VolthaService_GetVoltha_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetVoltha(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetVoltha",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetVoltha(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListCoreInstances_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListCoreInstances(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListCoreInstances",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListCoreInstances(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetCoreInstance_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetCoreInstance(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetCoreInstance",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetCoreInstance(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListAdapters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListAdapters(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListAdapters",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListAdapters(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDevices(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetLogicalDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetLogicalDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetLogicalDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetLogicalDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDevicePorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDevicePorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDevicePorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDevicePorts(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetLogicalDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LogicalPortId)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetLogicalDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetLogicalDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetLogicalDevicePort(ctx, req.(*LogicalPortId))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_EnableLogicalDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LogicalPortId)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).EnableLogicalDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/EnableLogicalDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).EnableLogicalDevicePort(ctx, req.(*LogicalPortId))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DisableLogicalDevicePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(LogicalPortId)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DisableLogicalDevicePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DisableLogicalDevicePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DisableLogicalDevicePort(ctx, req.(*LogicalPortId))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDeviceFlows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlows(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDeviceFlows",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlows(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateLogicalDeviceFlowTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(openflow_13.FlowTableUpdate)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateLogicalDeviceFlowTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowTable(ctx, req.(*openflow_13.FlowTableUpdate))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateLogicalDeviceMeterTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(openflow_13.MeterModUpdate)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceMeterTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateLogicalDeviceMeterTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceMeterTable(ctx, req.(*openflow_13.MeterModUpdate))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDeviceMeters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDeviceMeters(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDeviceMeters",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDeviceMeters(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListLogicalDeviceFlowGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlowGroups(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListLogicalDeviceFlowGroups",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListLogicalDeviceFlowGroups(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateLogicalDeviceFlowGroupTable_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(openflow_13.FlowGroupTableUpdate)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowGroupTable(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateLogicalDeviceFlowGroupTable",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateLogicalDeviceFlowGroupTable(ctx, req.(*openflow_13.FlowGroupTableUpdate))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDevices(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceIds_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceIds(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceIds",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceIds(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ReconcileDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.IDs)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ReconcileDevices(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ReconcileDevices",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ReconcileDevices(ctx, req.(*common.IDs))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_CreateDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Device)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CreateDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CreateDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CreateDevice(ctx, req.(*Device))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_EnableDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).EnableDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/EnableDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).EnableDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DisableDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DisableDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DisableDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DisableDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_RebootDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).RebootDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/RebootDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).RebootDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DeleteDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DeleteDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DeleteDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DeleteDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ForceDeleteDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ForceDeleteDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ForceDeleteDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ForceDeleteDevice(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DownloadImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DownloadImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DownloadImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DownloadImage(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImageDownloadStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImageDownloadStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImageDownloadStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImageDownloadStatus(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImageDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImageDownload(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImageDownload",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImageDownload(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListImageDownloads_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListImageDownloads(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListImageDownloads",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListImageDownloads(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_CancelImageDownload_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CancelImageDownload(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CancelImageDownload",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CancelImageDownload(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ActivateImageUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ActivateImageUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ActivateImageUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ActivateImageUpdate(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_RevertImageUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(ImageDownload)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).RevertImageUpdate(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/RevertImageUpdate",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).RevertImageUpdate(ctx, req.(*ImageDownload))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DownloadImageToDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeviceImageDownloadRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DownloadImageToDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DownloadImageToDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DownloadImageToDevice(ctx, req.(*DeviceImageDownloadRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImageStatus_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImageStatus(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImageStatus",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImageStatus(ctx, req.(*DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_AbortImageUpgradeToDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).AbortImageUpgradeToDevice(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/AbortImageUpgradeToDevice",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).AbortImageUpgradeToDevice(ctx, req.(*DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetOnuImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetOnuImages(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetOnuImages",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetOnuImages(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ActivateImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ActivateImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ActivateImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ActivateImage(ctx, req.(*DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_CommitImage_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(DeviceImageRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CommitImage(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CommitImage",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CommitImage(ctx, req.(*DeviceImageRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDevicePorts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDevicePorts(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDevicePorts",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDevicePorts(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDevicePmConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDevicePmConfigs(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDevicePmConfigs",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDevicePmConfigs(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateDevicePmConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(PmConfigs)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateDevicePmConfigs(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateDevicePmConfigs",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateDevicePmConfigs(ctx, req.(*PmConfigs))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceFlows_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceFlows(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceFlows",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceFlows(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceFlowGroups_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceFlowGroups(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceFlowGroups",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceFlowGroups(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListDeviceTypes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListDeviceTypes(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListDeviceTypes",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListDeviceTypes(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetDeviceType_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetDeviceType(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetDeviceType",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetDeviceType(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_StreamPacketsOut_Handler(srv interface{}, stream grpc.ServerStream) error {
+	return srv.(VolthaServiceServer).StreamPacketsOut(&volthaServiceStreamPacketsOutServer{stream})
+}
+
+type VolthaService_StreamPacketsOutServer interface {
+	SendAndClose(*empty.Empty) error
+	Recv() (*openflow_13.PacketOut, error)
+	grpc.ServerStream
+}
+
+type volthaServiceStreamPacketsOutServer struct {
+	grpc.ServerStream
+}
+
+func (x *volthaServiceStreamPacketsOutServer) SendAndClose(m *empty.Empty) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func (x *volthaServiceStreamPacketsOutServer) Recv() (*openflow_13.PacketOut, error) {
+	m := new(openflow_13.PacketOut)
+	if err := x.ServerStream.RecvMsg(m); err != nil {
+		return nil, err
+	}
+	return m, nil
+}
+
+func _VolthaService_ReceivePacketsIn_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(empty.Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(VolthaServiceServer).ReceivePacketsIn(m, &volthaServiceReceivePacketsInServer{stream})
+}
+
+type VolthaService_ReceivePacketsInServer interface {
+	Send(*openflow_13.PacketIn) error
+	grpc.ServerStream
+}
+
+type volthaServiceReceivePacketsInServer struct {
+	grpc.ServerStream
+}
+
+func (x *volthaServiceReceivePacketsInServer) Send(m *openflow_13.PacketIn) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _VolthaService_ReceiveChangeEvents_Handler(srv interface{}, stream grpc.ServerStream) error {
+	m := new(empty.Empty)
+	if err := stream.RecvMsg(m); err != nil {
+		return err
+	}
+	return srv.(VolthaServiceServer).ReceiveChangeEvents(m, &volthaServiceReceiveChangeEventsServer{stream})
+}
+
+type VolthaService_ReceiveChangeEventsServer interface {
+	Send(*openflow_13.ChangeEvent) error
+	grpc.ServerStream
+}
+
+type volthaServiceReceiveChangeEventsServer struct {
+	grpc.ServerStream
+}
+
+func (x *volthaServiceReceiveChangeEventsServer) Send(m *openflow_13.ChangeEvent) error {
+	return x.ServerStream.SendMsg(m)
+}
+
+func _VolthaService_CreateEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).CreateEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/CreateEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).CreateEventFilter(ctx, req.(*EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetEventFilter(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_UpdateEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).UpdateEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/UpdateEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).UpdateEventFilter(ctx, req.(*EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DeleteEventFilter_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(EventFilter)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DeleteEventFilter(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DeleteEventFilter",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DeleteEventFilter(ctx, req.(*EventFilter))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_ListEventFilters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(empty.Empty)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).ListEventFilters(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/ListEventFilters",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).ListEventFilters(ctx, req.(*empty.Empty))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetImages_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetImages(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetImages",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetImages(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_SelfTest_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).SelfTest(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/SelfTest",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).SelfTest(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetMibDeviceData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetMibDeviceData(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetMibDeviceData",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetMibDeviceData(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetAlarmDeviceData_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(common.ID)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetAlarmDeviceData(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetAlarmDeviceData",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetAlarmDeviceData(ctx, req.(*common.ID))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_SimulateAlarm_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(SimulateAlarmRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).SimulateAlarm(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/SimulateAlarm",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).SimulateAlarm(ctx, req.(*SimulateAlarmRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_EnablePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).EnablePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/EnablePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).EnablePort(ctx, req.(*Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_DisablePort_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(Port)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).DisablePort(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/DisablePort",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).DisablePort(ctx, req.(*Port))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_GetExtValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(extension.ValueSpecifier)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).GetExtValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/GetExtValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).GetExtValue(ctx, req.(*extension.ValueSpecifier))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_SetExtValue_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(extension.ValueSet)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).SetExtValue(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/SetExtValue",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).SetExtValue(ctx, req.(*extension.ValueSet))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+func _VolthaService_StartOmciTestAction_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
+	in := new(omci.OmciTestRequest)
+	if err := dec(in); err != nil {
+		return nil, err
+	}
+	if interceptor == nil {
+		return srv.(VolthaServiceServer).StartOmciTestAction(ctx, in)
+	}
+	info := &grpc.UnaryServerInfo{
+		Server:     srv,
+		FullMethod: "/voltha.VolthaService/StartOmciTestAction",
+	}
+	handler := func(ctx context.Context, req interface{}) (interface{}, error) {
+		return srv.(VolthaServiceServer).StartOmciTestAction(ctx, req.(*omci.OmciTestRequest))
+	}
+	return interceptor(ctx, in, info, handler)
+}
+
+var _VolthaService_serviceDesc = grpc.ServiceDesc{
+	ServiceName: "voltha.VolthaService",
+	HandlerType: (*VolthaServiceServer)(nil),
+	Methods: []grpc.MethodDesc{
+		{
+			MethodName: "GetVoltha",
+			Handler:    _VolthaService_GetVoltha_Handler,
+		},
+		{
+			MethodName: "ListCoreInstances",
+			Handler:    _VolthaService_ListCoreInstances_Handler,
+		},
+		{
+			MethodName: "GetCoreInstance",
+			Handler:    _VolthaService_GetCoreInstance_Handler,
+		},
+		{
+			MethodName: "ListAdapters",
+			Handler:    _VolthaService_ListAdapters_Handler,
+		},
+		{
+			MethodName: "ListLogicalDevices",
+			Handler:    _VolthaService_ListLogicalDevices_Handler,
+		},
+		{
+			MethodName: "GetLogicalDevice",
+			Handler:    _VolthaService_GetLogicalDevice_Handler,
+		},
+		{
+			MethodName: "ListLogicalDevicePorts",
+			Handler:    _VolthaService_ListLogicalDevicePorts_Handler,
+		},
+		{
+			MethodName: "GetLogicalDevicePort",
+			Handler:    _VolthaService_GetLogicalDevicePort_Handler,
+		},
+		{
+			MethodName: "EnableLogicalDevicePort",
+			Handler:    _VolthaService_EnableLogicalDevicePort_Handler,
+		},
+		{
+			MethodName: "DisableLogicalDevicePort",
+			Handler:    _VolthaService_DisableLogicalDevicePort_Handler,
+		},
+		{
+			MethodName: "ListLogicalDeviceFlows",
+			Handler:    _VolthaService_ListLogicalDeviceFlows_Handler,
+		},
+		{
+			MethodName: "UpdateLogicalDeviceFlowTable",
+			Handler:    _VolthaService_UpdateLogicalDeviceFlowTable_Handler,
+		},
+		{
+			MethodName: "UpdateLogicalDeviceMeterTable",
+			Handler:    _VolthaService_UpdateLogicalDeviceMeterTable_Handler,
+		},
+		{
+			MethodName: "ListLogicalDeviceMeters",
+			Handler:    _VolthaService_ListLogicalDeviceMeters_Handler,
+		},
+		{
+			MethodName: "ListLogicalDeviceFlowGroups",
+			Handler:    _VolthaService_ListLogicalDeviceFlowGroups_Handler,
+		},
+		{
+			MethodName: "UpdateLogicalDeviceFlowGroupTable",
+			Handler:    _VolthaService_UpdateLogicalDeviceFlowGroupTable_Handler,
+		},
+		{
+			MethodName: "ListDevices",
+			Handler:    _VolthaService_ListDevices_Handler,
+		},
+		{
+			MethodName: "ListDeviceIds",
+			Handler:    _VolthaService_ListDeviceIds_Handler,
+		},
+		{
+			MethodName: "ReconcileDevices",
+			Handler:    _VolthaService_ReconcileDevices_Handler,
+		},
+		{
+			MethodName: "GetDevice",
+			Handler:    _VolthaService_GetDevice_Handler,
+		},
+		{
+			MethodName: "CreateDevice",
+			Handler:    _VolthaService_CreateDevice_Handler,
+		},
+		{
+			MethodName: "EnableDevice",
+			Handler:    _VolthaService_EnableDevice_Handler,
+		},
+		{
+			MethodName: "DisableDevice",
+			Handler:    _VolthaService_DisableDevice_Handler,
+		},
+		{
+			MethodName: "RebootDevice",
+			Handler:    _VolthaService_RebootDevice_Handler,
+		},
+		{
+			MethodName: "DeleteDevice",
+			Handler:    _VolthaService_DeleteDevice_Handler,
+		},
+		{
+			MethodName: "ForceDeleteDevice",
+			Handler:    _VolthaService_ForceDeleteDevice_Handler,
+		},
+		{
+			MethodName: "DownloadImage",
+			Handler:    _VolthaService_DownloadImage_Handler,
+		},
+		{
+			MethodName: "GetImageDownloadStatus",
+			Handler:    _VolthaService_GetImageDownloadStatus_Handler,
+		},
+		{
+			MethodName: "GetImageDownload",
+			Handler:    _VolthaService_GetImageDownload_Handler,
+		},
+		{
+			MethodName: "ListImageDownloads",
+			Handler:    _VolthaService_ListImageDownloads_Handler,
+		},
+		{
+			MethodName: "CancelImageDownload",
+			Handler:    _VolthaService_CancelImageDownload_Handler,
+		},
+		{
+			MethodName: "ActivateImageUpdate",
+			Handler:    _VolthaService_ActivateImageUpdate_Handler,
+		},
+		{
+			MethodName: "RevertImageUpdate",
+			Handler:    _VolthaService_RevertImageUpdate_Handler,
+		},
+		{
+			MethodName: "DownloadImageToDevice",
+			Handler:    _VolthaService_DownloadImageToDevice_Handler,
+		},
+		{
+			MethodName: "GetImageStatus",
+			Handler:    _VolthaService_GetImageStatus_Handler,
+		},
+		{
+			MethodName: "AbortImageUpgradeToDevice",
+			Handler:    _VolthaService_AbortImageUpgradeToDevice_Handler,
+		},
+		{
+			MethodName: "GetOnuImages",
+			Handler:    _VolthaService_GetOnuImages_Handler,
+		},
+		{
+			MethodName: "ActivateImage",
+			Handler:    _VolthaService_ActivateImage_Handler,
+		},
+		{
+			MethodName: "CommitImage",
+			Handler:    _VolthaService_CommitImage_Handler,
+		},
+		{
+			MethodName: "ListDevicePorts",
+			Handler:    _VolthaService_ListDevicePorts_Handler,
+		},
+		{
+			MethodName: "ListDevicePmConfigs",
+			Handler:    _VolthaService_ListDevicePmConfigs_Handler,
+		},
+		{
+			MethodName: "UpdateDevicePmConfigs",
+			Handler:    _VolthaService_UpdateDevicePmConfigs_Handler,
+		},
+		{
+			MethodName: "ListDeviceFlows",
+			Handler:    _VolthaService_ListDeviceFlows_Handler,
+		},
+		{
+			MethodName: "ListDeviceFlowGroups",
+			Handler:    _VolthaService_ListDeviceFlowGroups_Handler,
+		},
+		{
+			MethodName: "ListDeviceTypes",
+			Handler:    _VolthaService_ListDeviceTypes_Handler,
+		},
+		{
+			MethodName: "GetDeviceType",
+			Handler:    _VolthaService_GetDeviceType_Handler,
+		},
+		{
+			MethodName: "CreateEventFilter",
+			Handler:    _VolthaService_CreateEventFilter_Handler,
+		},
+		{
+			MethodName: "GetEventFilter",
+			Handler:    _VolthaService_GetEventFilter_Handler,
+		},
+		{
+			MethodName: "UpdateEventFilter",
+			Handler:    _VolthaService_UpdateEventFilter_Handler,
+		},
+		{
+			MethodName: "DeleteEventFilter",
+			Handler:    _VolthaService_DeleteEventFilter_Handler,
+		},
+		{
+			MethodName: "ListEventFilters",
+			Handler:    _VolthaService_ListEventFilters_Handler,
+		},
+		{
+			MethodName: "GetImages",
+			Handler:    _VolthaService_GetImages_Handler,
+		},
+		{
+			MethodName: "SelfTest",
+			Handler:    _VolthaService_SelfTest_Handler,
+		},
+		{
+			MethodName: "GetMibDeviceData",
+			Handler:    _VolthaService_GetMibDeviceData_Handler,
+		},
+		{
+			MethodName: "GetAlarmDeviceData",
+			Handler:    _VolthaService_GetAlarmDeviceData_Handler,
+		},
+		{
+			MethodName: "SimulateAlarm",
+			Handler:    _VolthaService_SimulateAlarm_Handler,
+		},
+		{
+			MethodName: "EnablePort",
+			Handler:    _VolthaService_EnablePort_Handler,
+		},
+		{
+			MethodName: "DisablePort",
+			Handler:    _VolthaService_DisablePort_Handler,
+		},
+		{
+			MethodName: "GetExtValue",
+			Handler:    _VolthaService_GetExtValue_Handler,
+		},
+		{
+			MethodName: "SetExtValue",
+			Handler:    _VolthaService_SetExtValue_Handler,
+		},
+		{
+			MethodName: "StartOmciTestAction",
+			Handler:    _VolthaService_StartOmciTestAction_Handler,
+		},
+	},
+	Streams: []grpc.StreamDesc{
+		{
+			StreamName:    "StreamPacketsOut",
+			Handler:       _VolthaService_StreamPacketsOut_Handler,
+			ClientStreams: true,
+		},
+		{
+			StreamName:    "ReceivePacketsIn",
+			Handler:       _VolthaService_ReceivePacketsIn_Handler,
+			ServerStreams: true,
+		},
+		{
+			StreamName:    "ReceiveChangeEvents",
+			Handler:       _VolthaService_ReceiveChangeEvents_Handler,
+			ServerStreams: true,
+		},
+	},
+	Metadata: "voltha_protos/voltha.proto",
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore
new file mode 100644
index 0000000..c57100a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.gitignore
@@ -0,0 +1 @@
+coverage.txt
diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml
new file mode 100644
index 0000000..b950e42
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml
@@ -0,0 +1,20 @@
+language: go
+
+matrix:
+  include:
+  - go: "1.13.x"
+  - go: "1.14.x"
+  - go: "tip"
+    env:
+    - LINT=true
+    - COVERAGE=true
+
+install:
+  - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi
+  - go get -u github.com/stretchr/testify/...
+
+script:
+  - make test
+  - go build ./...
+  - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi
+  - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
new file mode 100644
index 0000000..d3bfcf6
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
@@ -0,0 +1,63 @@
+Changes by Version
+==================
+
+
+1.2.0 (2020-07-01)
+-------------------
+
+* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro
+* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed
+* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena
+* Add Go module support (#215) -- Zaba505
+* Make SetTag helper types in ext public (#229) -- Blake Edwards
+* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov
+* Improve noop impementation (#223) -- chanxuehong
+* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak
+* Fix typo in comments (#222) -- meteorlxy
+* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅
+* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad
+
+
+1.1.0 (2019-03-23)
+-------------------
+
+Notable changes:
+- The library is now released under Apache 2.0 license
+- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159))
+- 'golang.org/x/net/context' is replaced with 'context' from the standard library
+
+List of all changes:
+
+- Export StartSpanFromContextWithTracer (#214) <Aaron Delaney>
+- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) <Mike Goldsmith>
+- Use Set() instead of Add() in HTTPHeadersCarrier (#191) <jeremyxu2010>
+- Update license to Apache 2.0 (#181) <Andrea Kao>
+- Replace 'golang.org/x/net/context' with 'context' (#176) <Tony Ghita>
+- Port of Python opentracing/harness/api_check.py to Go (#146) <chris erway>
+- Fix race condition in MockSpan.Context() (#170) <Brad>
+- Add PeerHostIPv4.SetString() (#155)  <NeoCN>
+- Add a Noop log field type to log to allow for optional fields (#150)  <Matt Ho>
+
+
+1.0.2 (2017-04-26)
+-------------------
+
+- Add more semantic tags (#139) <Rustam Zagirov>
+
+
+1.0.1 (2017-02-06)
+-------------------
+
+- Correct spelling in comments <Ben Sigelman>
+- Address race in nextMockID() (#123) <bill fumerola>
+- log: avoid panic marshaling nil error (#131) <Anthony Voutas>
+- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) <Yuri Shkuro>
+- Drop Go 1.5 that fails in Travis (#129) <Yuri Shkuro>
+- Add convenience methods Key() and Value() to log.Field <Ben Sigelman>
+- Add convenience methods to log.Field (2 years, 6 months ago) <Radu Berinde>
+
+1.0.0 (2016-09-26)
+-------------------
+
+- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec)
+
diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE
new file mode 100644
index 0000000..f002734
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "{}"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright 2016 The OpenTracing Authors
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile
new file mode 100644
index 0000000..62abb63
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/Makefile
@@ -0,0 +1,20 @@
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test lint
+
+.PHONY: test
+test:
+	go test -v -cover -race ./...
+
+.PHONY: cover
+cover:
+	go test -v -coverprofile=coverage.txt -covermode=atomic -race ./...
+
+.PHONY: lint
+lint:
+	go fmt ./...
+	golint ./...
+	@# Run again with magic to exit non-zero if golint outputs anything.
+	@! (golint ./... | read dummy)
+	go vet ./...
diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md
new file mode 100644
index 0000000..6ef1d7c
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/README.md
@@ -0,0 +1,171 @@
+[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go)
+[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge)
+
+# OpenTracing API for Go
+
+This package is a Go platform API for OpenTracing.
+
+## Required Reading
+
+In order to understand the Go platform API, one must first be familiar with the
+[OpenTracing project](https://opentracing.io) and
+[terminology](https://opentracing.io/specification/) more specifically.
+
+## API overview for those adding instrumentation
+
+Everyday consumers of this `opentracing` package really only need to worry
+about a couple of key abstractions: the `StartSpan` function, the `Span`
+interface, and binding a `Tracer` at `main()`-time. Here are code snippets
+demonstrating some important use cases.
+
+#### Singleton initialization
+
+The simplest starting point is `./default_tracer.go`. As early as possible, call
+
+```go
+    import "github.com/opentracing/opentracing-go"
+    import ".../some_tracing_impl"
+
+    func main() {
+        opentracing.SetGlobalTracer(
+            // tracing impl specific:
+            some_tracing_impl.New(...),
+        )
+        ...
+    }
+```
+
+#### Non-Singleton initialization
+
+If you prefer direct control to singletons, manage ownership of the
+`opentracing.Tracer` implementation explicitly.
+
+#### Creating a Span given an existing Go `context.Context`
+
+If you use `context.Context` in your application, OpenTracing's Go library will
+happily rely on it for `Span` propagation. To start a new (blocking child)
+`Span`, you can use `StartSpanFromContext`.
+
+```go
+    func xyz(ctx context.Context, ...) {
+        ...
+        span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name")
+        defer span.Finish()
+        span.LogFields(
+            log.String("event", "soft error"),
+            log.String("type", "cache timeout"),
+            log.Int("waited.millis", 1500))
+        ...
+    }
+```
+
+#### Starting an empty trace by creating a "root span"
+
+It's always possible to create a "root" `Span` with no parent or other causal
+reference.
+
+```go
+    func xyz() {
+        ...
+        sp := opentracing.StartSpan("operation_name")
+        defer sp.Finish()
+        ...
+    }
+```
+
+#### Creating a (child) Span given an existing (parent) Span
+
+```go
+    func xyz(parentSpan opentracing.Span, ...) {
+        ...
+        sp := opentracing.StartSpan(
+            "operation_name",
+            opentracing.ChildOf(parentSpan.Context()))
+        defer sp.Finish()
+        ...
+    }
+```
+
+#### Serializing to the wire
+
+```go
+    func makeSomeRequest(ctx context.Context) ... {
+        if span := opentracing.SpanFromContext(ctx); span != nil {
+            httpClient := &http.Client{}
+            httpReq, _ := http.NewRequest("GET", "http://myservice/", nil)
+
+            // Transmit the span's TraceContext as HTTP headers on our
+            // outbound request.
+            opentracing.GlobalTracer().Inject(
+                span.Context(),
+                opentracing.HTTPHeaders,
+                opentracing.HTTPHeadersCarrier(httpReq.Header))
+
+            resp, err := httpClient.Do(httpReq)
+            ...
+        }
+        ...
+    }
+```
+
+#### Deserializing from the wire
+
+```go
+    http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) {
+        var serverSpan opentracing.Span
+        appSpecificOperationName := ...
+        wireContext, err := opentracing.GlobalTracer().Extract(
+            opentracing.HTTPHeaders,
+            opentracing.HTTPHeadersCarrier(req.Header))
+        if err != nil {
+            // Optionally record something about err here
+        }
+
+        // Create the span referring to the RPC client if available.
+        // If wireContext == nil, a root span will be created.
+        serverSpan = opentracing.StartSpan(
+            appSpecificOperationName,
+            ext.RPCServerOption(wireContext))
+
+        defer serverSpan.Finish()
+
+        ctx := opentracing.ContextWithSpan(context.Background(), serverSpan)
+        ...
+    }
+```
+
+#### Conditionally capture a field using `log.Noop`
+
+In some situations, you may want to dynamically decide whether or not
+to log a field.  For example, you may want to capture additional data,
+such as a customer ID, in non-production environments:
+
+```go
+    func Customer(order *Order) log.Field {
+        if os.Getenv("ENVIRONMENT") == "dev" {
+            return log.String("customer", order.Customer.ID)
+        }
+        return log.Noop()
+    }
+```
+
+#### Goroutine-safety
+
+The entire public API is goroutine-safe and does not require external
+synchronization.
+
+## API pointers for those implementing a tracing system
+
+Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`.
+
+## API compatibility
+
+For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority.
+
+## Tracer test suite
+
+A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly.
+
+## Licensing
+
+[Apache 2.0 License](./LICENSE).
diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go
new file mode 100644
index 0000000..e11977e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext.go
@@ -0,0 +1,24 @@
+package opentracing
+
+import (
+	"context"
+)
+
+// TracerContextWithSpanExtension is an extension interface that the
+// implementation of the Tracer interface may want to implement. It
+// allows to have some control over the go context when the
+// ContextWithSpan is invoked.
+//
+// The primary purpose of this extension are adapters from opentracing
+// API to some other tracing API.
+type TracerContextWithSpanExtension interface {
+	// ContextWithSpanHook gets called by the ContextWithSpan
+	// function, when the Tracer implementation also implements
+	// this interface. It allows to put extra information into the
+	// context and make it available to the callers of the
+	// ContextWithSpan.
+	//
+	// This hook is invoked before the ContextWithSpan function
+	// actually puts the span into the context.
+	ContextWithSpanHook(ctx context.Context, span Span) context.Context
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go
new file mode 100644
index 0000000..8282bd7
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/field.go
@@ -0,0 +1,17 @@
+package ext
+
+import (
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// LogError sets the error=true tag on the Span and logs err as an "error" event.
+func LogError(span opentracing.Span, err error, fields ...log.Field) {
+	Error.Set(span, true)
+	ef := []log.Field{
+		log.Event("error"),
+		log.Error(err),
+	}
+	ef = append(ef, fields...)
+	span.LogFields(ef...)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
new file mode 100644
index 0000000..a414b59
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go
@@ -0,0 +1,215 @@
+package ext
+
+import "github.com/opentracing/opentracing-go"
+
+// These constants define common tag names recommended for better portability across
+// tracing systems and languages/platforms.
+//
+// The tag names are defined as typed strings, so that in addition to the usual use
+//
+//     span.setTag(TagName, value)
+//
+// they also support value type validation via this additional syntax:
+//
+//    TagName.Set(span, value)
+//
+var (
+	//////////////////////////////////////////////////////////////////////
+	// SpanKind (client/server or producer/consumer)
+	//////////////////////////////////////////////////////////////////////
+
+	// SpanKind hints at relationship between spans, e.g. client/server
+	SpanKind = spanKindTagName("span.kind")
+
+	// SpanKindRPCClient marks a span representing the client-side of an RPC
+	// or other remote call
+	SpanKindRPCClientEnum = SpanKindEnum("client")
+	SpanKindRPCClient     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum}
+
+	// SpanKindRPCServer marks a span representing the server-side of an RPC
+	// or other remote call
+	SpanKindRPCServerEnum = SpanKindEnum("server")
+	SpanKindRPCServer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum}
+
+	// SpanKindProducer marks a span representing the producer-side of a
+	// message bus
+	SpanKindProducerEnum = SpanKindEnum("producer")
+	SpanKindProducer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum}
+
+	// SpanKindConsumer marks a span representing the consumer-side of a
+	// message bus
+	SpanKindConsumerEnum = SpanKindEnum("consumer")
+	SpanKindConsumer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum}
+
+	//////////////////////////////////////////////////////////////////////
+	// Component name
+	//////////////////////////////////////////////////////////////////////
+
+	// Component is a low-cardinality identifier of the module, library,
+	// or package that is generating a span.
+	Component = StringTagName("component")
+
+	//////////////////////////////////////////////////////////////////////
+	// Sampling hint
+	//////////////////////////////////////////////////////////////////////
+
+	// SamplingPriority determines the priority of sampling this Span.
+	SamplingPriority = Uint16TagName("sampling.priority")
+
+	//////////////////////////////////////////////////////////////////////
+	// Peer tags. These tags can be emitted by either client-side or
+	// server-side to describe the other side/service in a peer-to-peer
+	// communications, like an RPC call.
+	//////////////////////////////////////////////////////////////////////
+
+	// PeerService records the service name of the peer.
+	PeerService = StringTagName("peer.service")
+
+	// PeerAddress records the address name of the peer. This may be a "ip:port",
+	// a bare "hostname", a FQDN or even a database DSN substring
+	// like "mysql://username@127.0.0.1:3306/dbname"
+	PeerAddress = StringTagName("peer.address")
+
+	// PeerHostname records the host name of the peer
+	PeerHostname = StringTagName("peer.hostname")
+
+	// PeerHostIPv4 records IP v4 host address of the peer
+	PeerHostIPv4 = IPv4TagName("peer.ipv4")
+
+	// PeerHostIPv6 records IP v6 host address of the peer
+	PeerHostIPv6 = StringTagName("peer.ipv6")
+
+	// PeerPort records port number of the peer
+	PeerPort = Uint16TagName("peer.port")
+
+	//////////////////////////////////////////////////////////////////////
+	// HTTP Tags
+	//////////////////////////////////////////////////////////////////////
+
+	// HTTPUrl should be the URL of the request being handled in this segment
+	// of the trace, in standard URI format. The protocol is optional.
+	HTTPUrl = StringTagName("http.url")
+
+	// HTTPMethod is the HTTP method of the request, and is case-insensitive.
+	HTTPMethod = StringTagName("http.method")
+
+	// HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the
+	// HTTP response.
+	HTTPStatusCode = Uint16TagName("http.status_code")
+
+	//////////////////////////////////////////////////////////////////////
+	// DB Tags
+	//////////////////////////////////////////////////////////////////////
+
+	// DBInstance is database instance name.
+	DBInstance = StringTagName("db.instance")
+
+	// DBStatement is a database statement for the given database type.
+	// It can be a query or a prepared statement (i.e., before substitution).
+	DBStatement = StringTagName("db.statement")
+
+	// DBType is a database type. For any SQL database, "sql".
+	// For others, the lower-case database category, e.g. "redis"
+	DBType = StringTagName("db.type")
+
+	// DBUser is a username for accessing database.
+	DBUser = StringTagName("db.user")
+
+	//////////////////////////////////////////////////////////////////////
+	// Message Bus Tag
+	//////////////////////////////////////////////////////////////////////
+
+	// MessageBusDestination is an address at which messages can be exchanged
+	MessageBusDestination = StringTagName("message_bus.destination")
+
+	//////////////////////////////////////////////////////////////////////
+	// Error Tag
+	//////////////////////////////////////////////////////////////////////
+
+	// Error indicates that operation represented by the span resulted in an error.
+	Error = BoolTagName("error")
+)
+
+// ---
+
+// SpanKindEnum represents common span types
+type SpanKindEnum string
+
+type spanKindTagName string
+
+// Set adds a string tag to the `span`
+func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) {
+	span.SetTag(string(tag), value)
+}
+
+type rpcServerOption struct {
+	clientContext opentracing.SpanContext
+}
+
+func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) {
+	if r.clientContext != nil {
+		opentracing.ChildOf(r.clientContext).Apply(o)
+	}
+	SpanKindRPCServer.Apply(o)
+}
+
+// RPCServerOption returns a StartSpanOption appropriate for an RPC server span
+// with `client` representing the metadata for the remote peer Span if available.
+// In case client == nil, due to the client not being instrumented, this RPC
+// server span will be a root span.
+func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption {
+	return rpcServerOption{client}
+}
+
+// ---
+
+// StringTagName is a common tag name to be set to a string value
+type StringTagName string
+
+// Set adds a string tag to the `span`
+func (tag StringTagName) Set(span opentracing.Span, value string) {
+	span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint32TagName is a common tag name to be set to a uint32 value
+type Uint32TagName string
+
+// Set adds a uint32 tag to the `span`
+func (tag Uint32TagName) Set(span opentracing.Span, value uint32) {
+	span.SetTag(string(tag), value)
+}
+
+// ---
+
+// Uint16TagName is a common tag name to be set to a uint16 value
+type Uint16TagName string
+
+// Set adds a uint16 tag to the `span`
+func (tag Uint16TagName) Set(span opentracing.Span, value uint16) {
+	span.SetTag(string(tag), value)
+}
+
+// ---
+
+// BoolTagName is a common tag name to be set to a bool value
+type BoolTagName string
+
+// Set adds a bool tag to the `span`
+func (tag BoolTagName) Set(span opentracing.Span, value bool) {
+	span.SetTag(string(tag), value)
+}
+
+// IPv4TagName is a common tag name to be set to an ipv4 value
+type IPv4TagName string
+
+// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility
+func (tag IPv4TagName) Set(span opentracing.Span, value uint32) {
+	span.SetTag(string(tag), value)
+}
+
+// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1"
+func (tag IPv4TagName) SetString(span opentracing.Span, value string) {
+	span.SetTag(string(tag), value)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
new file mode 100644
index 0000000..4f7066a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go
@@ -0,0 +1,42 @@
+package opentracing
+
+type registeredTracer struct {
+	tracer       Tracer
+	isRegistered bool
+}
+
+var (
+	globalTracer = registeredTracer{NoopTracer{}, false}
+)
+
+// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by
+// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an
+// opentracing.Tracer instance) should call SetGlobalTracer as early as
+// possible in main(), prior to calling the `StartSpan` global func below.
+// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan`
+// (etc) globals are noops.
+func SetGlobalTracer(tracer Tracer) {
+	globalTracer = registeredTracer{tracer, true}
+}
+
+// GlobalTracer returns the global singleton `Tracer` implementation.
+// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop
+// implementation that drops all data handed to it.
+func GlobalTracer() Tracer {
+	return globalTracer.tracer
+}
+
+// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`.
+func StartSpan(operationName string, opts ...StartSpanOption) Span {
+	return globalTracer.tracer.StartSpan(operationName, opts...)
+}
+
+// InitGlobalTracer is deprecated. Please use SetGlobalTracer.
+func InitGlobalTracer(tracer Tracer) {
+	SetGlobalTracer(tracer)
+}
+
+// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered
+func IsGlobalTracerRegistered() bool {
+	return globalTracer.isRegistered
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/go.mod b/vendor/github.com/opentracing/opentracing-go/go.mod
new file mode 100644
index 0000000..bf48bb5
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.mod
@@ -0,0 +1,5 @@
+module github.com/opentracing/opentracing-go
+
+go 1.14
+
+require github.com/stretchr/testify v1.3.0
diff --git a/vendor/github.com/opentracing/opentracing-go/go.sum b/vendor/github.com/opentracing/opentracing-go/go.sum
new file mode 100644
index 0000000..4347755
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/go.sum
@@ -0,0 +1,7 @@
+github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go
new file mode 100644
index 0000000..1831bc9
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go
@@ -0,0 +1,65 @@
+package opentracing
+
+import "context"
+
+type contextKey struct{}
+
+var activeSpanKey = contextKey{}
+
+// ContextWithSpan returns a new `context.Context` that holds a reference to
+// the span. If span is nil, a new context without an active span is returned.
+func ContextWithSpan(ctx context.Context, span Span) context.Context {
+	if span != nil {
+		if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok {
+			ctx = tracerWithHook.ContextWithSpanHook(ctx, span)
+		}
+	}
+	return context.WithValue(ctx, activeSpanKey, span)
+}
+
+// SpanFromContext returns the `Span` previously associated with `ctx`, or
+// `nil` if no such `Span` could be found.
+//
+// NOTE: context.Context != SpanContext: the former is Go's intra-process
+// context propagation mechanism, and the latter houses OpenTracing's per-Span
+// identity and baggage information.
+func SpanFromContext(ctx context.Context) Span {
+	val := ctx.Value(activeSpanKey)
+	if sp, ok := val.(Span); ok {
+		return sp
+	}
+	return nil
+}
+
+// StartSpanFromContext starts and returns a Span with `operationName`, using
+// any Span found within `ctx` as a ChildOfRef. If no such parent could be
+// found, StartSpanFromContext creates a root (parentless) Span.
+//
+// The second return value is a context.Context object built around the
+// returned Span.
+//
+// Example usage:
+//
+//    SomeFunction(ctx context.Context, ...) {
+//        sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction")
+//        defer sp.Finish()
+//        ...
+//    }
+func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+	return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...)
+}
+
+// StartSpanFromContextWithTracer starts and returns a span with `operationName`
+// using  a span found within the context as a ChildOfRef. If that doesn't exist
+// it creates a root span. It also returns a context.Context object built
+// around the returned span.
+//
+// It's behavior is identical to StartSpanFromContext except that it takes an explicit
+// tracer as opposed to using the global tracer.
+func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) {
+	if parentSpan := SpanFromContext(ctx); parentSpan != nil {
+		opts = append(opts, ChildOf(parentSpan.Context()))
+	}
+	span := tracer.StartSpan(operationName, opts...)
+	return span, ContextWithSpan(ctx, span)
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go
new file mode 100644
index 0000000..f222ded
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/field.go
@@ -0,0 +1,282 @@
+package log
+
+import (
+	"fmt"
+	"math"
+)
+
+type fieldType int
+
+const (
+	stringType fieldType = iota
+	boolType
+	intType
+	int32Type
+	uint32Type
+	int64Type
+	uint64Type
+	float32Type
+	float64Type
+	errorType
+	objectType
+	lazyLoggerType
+	noopType
+)
+
+// Field instances are constructed via LogBool, LogString, and so on.
+// Tracing implementations may then handle them via the Field.Marshal
+// method.
+//
+// "heavily influenced by" (i.e., partially stolen from)
+// https://github.com/uber-go/zap
+type Field struct {
+	key          string
+	fieldType    fieldType
+	numericVal   int64
+	stringVal    string
+	interfaceVal interface{}
+}
+
+// String adds a string-valued key:value pair to a Span.LogFields() record
+func String(key, val string) Field {
+	return Field{
+		key:       key,
+		fieldType: stringType,
+		stringVal: val,
+	}
+}
+
+// Bool adds a bool-valued key:value pair to a Span.LogFields() record
+func Bool(key string, val bool) Field {
+	var numericVal int64
+	if val {
+		numericVal = 1
+	}
+	return Field{
+		key:        key,
+		fieldType:  boolType,
+		numericVal: numericVal,
+	}
+}
+
+// Int adds an int-valued key:value pair to a Span.LogFields() record
+func Int(key string, val int) Field {
+	return Field{
+		key:        key,
+		fieldType:  intType,
+		numericVal: int64(val),
+	}
+}
+
+// Int32 adds an int32-valued key:value pair to a Span.LogFields() record
+func Int32(key string, val int32) Field {
+	return Field{
+		key:        key,
+		fieldType:  int32Type,
+		numericVal: int64(val),
+	}
+}
+
+// Int64 adds an int64-valued key:value pair to a Span.LogFields() record
+func Int64(key string, val int64) Field {
+	return Field{
+		key:        key,
+		fieldType:  int64Type,
+		numericVal: val,
+	}
+}
+
+// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record
+func Uint32(key string, val uint32) Field {
+	return Field{
+		key:        key,
+		fieldType:  uint32Type,
+		numericVal: int64(val),
+	}
+}
+
+// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record
+func Uint64(key string, val uint64) Field {
+	return Field{
+		key:        key,
+		fieldType:  uint64Type,
+		numericVal: int64(val),
+	}
+}
+
+// Float32 adds a float32-valued key:value pair to a Span.LogFields() record
+func Float32(key string, val float32) Field {
+	return Field{
+		key:        key,
+		fieldType:  float32Type,
+		numericVal: int64(math.Float32bits(val)),
+	}
+}
+
+// Float64 adds a float64-valued key:value pair to a Span.LogFields() record
+func Float64(key string, val float64) Field {
+	return Field{
+		key:        key,
+		fieldType:  float64Type,
+		numericVal: int64(math.Float64bits(val)),
+	}
+}
+
+// Error adds an error with the key "error.object" to a Span.LogFields() record
+func Error(err error) Field {
+	return Field{
+		key:          "error.object",
+		fieldType:    errorType,
+		interfaceVal: err,
+	}
+}
+
+// Object adds an object-valued key:value pair to a Span.LogFields() record
+// Please pass in an immutable object, otherwise there may be concurrency issues.
+// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write".
+// Because span is sent asynchronously, it is possible that this map will also be modified.
+func Object(key string, obj interface{}) Field {
+	return Field{
+		key:          key,
+		fieldType:    objectType,
+		interfaceVal: obj,
+	}
+}
+
+// Event creates a string-valued Field for span logs with key="event" and value=val.
+func Event(val string) Field {
+	return String("event", val)
+}
+
+// Message creates a string-valued Field for span logs with key="message" and value=val.
+func Message(val string) Field {
+	return String("message", val)
+}
+
+// LazyLogger allows for user-defined, late-bound logging of arbitrary data
+type LazyLogger func(fv Encoder)
+
+// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing
+// implementation will call the LazyLogger function at an indefinite time in
+// the future (after Lazy() returns).
+func Lazy(ll LazyLogger) Field {
+	return Field{
+		fieldType:    lazyLoggerType,
+		interfaceVal: ll,
+	}
+}
+
+// Noop creates a no-op log field that should be ignored by the tracer.
+// It can be used to capture optional fields, for example those that should
+// only be logged in non-production environment:
+//
+//     func customerField(order *Order) log.Field {
+//          if os.Getenv("ENVIRONMENT") == "dev" {
+//              return log.String("customer", order.Customer.ID)
+//          }
+//          return log.Noop()
+//     }
+//
+//     span.LogFields(log.String("event", "purchase"), customerField(order))
+//
+func Noop() Field {
+	return Field{
+		fieldType: noopType,
+	}
+}
+
+// Encoder allows access to the contents of a Field (via a call to
+// Field.Marshal).
+//
+// Tracer implementations typically provide an implementation of Encoder;
+// OpenTracing callers typically do not need to concern themselves with it.
+type Encoder interface {
+	EmitString(key, value string)
+	EmitBool(key string, value bool)
+	EmitInt(key string, value int)
+	EmitInt32(key string, value int32)
+	EmitInt64(key string, value int64)
+	EmitUint32(key string, value uint32)
+	EmitUint64(key string, value uint64)
+	EmitFloat32(key string, value float32)
+	EmitFloat64(key string, value float64)
+	EmitObject(key string, value interface{})
+	EmitLazyLogger(value LazyLogger)
+}
+
+// Marshal passes a Field instance through to the appropriate
+// field-type-specific method of an Encoder.
+func (lf Field) Marshal(visitor Encoder) {
+	switch lf.fieldType {
+	case stringType:
+		visitor.EmitString(lf.key, lf.stringVal)
+	case boolType:
+		visitor.EmitBool(lf.key, lf.numericVal != 0)
+	case intType:
+		visitor.EmitInt(lf.key, int(lf.numericVal))
+	case int32Type:
+		visitor.EmitInt32(lf.key, int32(lf.numericVal))
+	case int64Type:
+		visitor.EmitInt64(lf.key, int64(lf.numericVal))
+	case uint32Type:
+		visitor.EmitUint32(lf.key, uint32(lf.numericVal))
+	case uint64Type:
+		visitor.EmitUint64(lf.key, uint64(lf.numericVal))
+	case float32Type:
+		visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal)))
+	case float64Type:
+		visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal)))
+	case errorType:
+		if err, ok := lf.interfaceVal.(error); ok {
+			visitor.EmitString(lf.key, err.Error())
+		} else {
+			visitor.EmitString(lf.key, "<nil>")
+		}
+	case objectType:
+		visitor.EmitObject(lf.key, lf.interfaceVal)
+	case lazyLoggerType:
+		visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger))
+	case noopType:
+		// intentionally left blank
+	}
+}
+
+// Key returns the field's key.
+func (lf Field) Key() string {
+	return lf.key
+}
+
+// Value returns the field's value as interface{}.
+func (lf Field) Value() interface{} {
+	switch lf.fieldType {
+	case stringType:
+		return lf.stringVal
+	case boolType:
+		return lf.numericVal != 0
+	case intType:
+		return int(lf.numericVal)
+	case int32Type:
+		return int32(lf.numericVal)
+	case int64Type:
+		return int64(lf.numericVal)
+	case uint32Type:
+		return uint32(lf.numericVal)
+	case uint64Type:
+		return uint64(lf.numericVal)
+	case float32Type:
+		return math.Float32frombits(uint32(lf.numericVal))
+	case float64Type:
+		return math.Float64frombits(uint64(lf.numericVal))
+	case errorType, objectType, lazyLoggerType:
+		return lf.interfaceVal
+	case noopType:
+		return nil
+	default:
+		return nil
+	}
+}
+
+// String returns a string representation of the key and value.
+func (lf Field) String() string {
+	return fmt.Sprint(lf.key, ":", lf.Value())
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go
new file mode 100644
index 0000000..d57e28a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/log/util.go
@@ -0,0 +1,61 @@
+package log
+
+import (
+	"fmt"
+	"reflect"
+)
+
+// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice
+// a la Span.LogFields().
+func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) {
+	if len(keyValues)%2 != 0 {
+		return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues))
+	}
+	fields := make([]Field, len(keyValues)/2)
+	for i := 0; i*2 < len(keyValues); i++ {
+		key, ok := keyValues[i*2].(string)
+		if !ok {
+			return nil, fmt.Errorf(
+				"non-string key (pair #%d): %T",
+				i, keyValues[i*2])
+		}
+		switch typedVal := keyValues[i*2+1].(type) {
+		case bool:
+			fields[i] = Bool(key, typedVal)
+		case string:
+			fields[i] = String(key, typedVal)
+		case int:
+			fields[i] = Int(key, typedVal)
+		case int8:
+			fields[i] = Int32(key, int32(typedVal))
+		case int16:
+			fields[i] = Int32(key, int32(typedVal))
+		case int32:
+			fields[i] = Int32(key, typedVal)
+		case int64:
+			fields[i] = Int64(key, typedVal)
+		case uint:
+			fields[i] = Uint64(key, uint64(typedVal))
+		case uint64:
+			fields[i] = Uint64(key, typedVal)
+		case uint8:
+			fields[i] = Uint32(key, uint32(typedVal))
+		case uint16:
+			fields[i] = Uint32(key, uint32(typedVal))
+		case uint32:
+			fields[i] = Uint32(key, typedVal)
+		case float32:
+			fields[i] = Float32(key, typedVal)
+		case float64:
+			fields[i] = Float64(key, typedVal)
+		default:
+			if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) {
+				fields[i] = String(key, "nil")
+				continue
+			}
+			// When in doubt, coerce to a string
+			fields[i] = String(key, fmt.Sprint(typedVal))
+		}
+	}
+	return fields, nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go
new file mode 100644
index 0000000..f9b680a
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/noop.go
@@ -0,0 +1,64 @@
+package opentracing
+
+import "github.com/opentracing/opentracing-go/log"
+
+// A NoopTracer is a trivial, minimum overhead implementation of Tracer
+// for which all operations are no-ops.
+//
+// The primary use of this implementation is in libraries, such as RPC
+// frameworks, that make tracing an optional feature controlled by the
+// end user. A no-op implementation allows said libraries to use it
+// as the default Tracer and to write instrumentation that does
+// not need to keep checking if the tracer instance is nil.
+//
+// For the same reason, the NoopTracer is the default "global" tracer
+// (see GlobalTracer and SetGlobalTracer functions).
+//
+// WARNING: NoopTracer does not support baggage propagation.
+type NoopTracer struct{}
+
+type noopSpan struct{}
+type noopSpanContext struct{}
+
+var (
+	defaultNoopSpanContext SpanContext = noopSpanContext{}
+	defaultNoopSpan        Span        = noopSpan{}
+	defaultNoopTracer      Tracer      = NoopTracer{}
+)
+
+const (
+	emptyString = ""
+)
+
+// noopSpanContext:
+func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
+
+// noopSpan:
+func (n noopSpan) Context() SpanContext                                  { return defaultNoopSpanContext }
+func (n noopSpan) SetBaggageItem(key, val string) Span                   { return n }
+func (n noopSpan) BaggageItem(key string) string                         { return emptyString }
+func (n noopSpan) SetTag(key string, value interface{}) Span             { return n }
+func (n noopSpan) LogFields(fields ...log.Field)                         {}
+func (n noopSpan) LogKV(keyVals ...interface{})                          {}
+func (n noopSpan) Finish()                                               {}
+func (n noopSpan) FinishWithOptions(opts FinishOptions)                  {}
+func (n noopSpan) SetOperationName(operationName string) Span            { return n }
+func (n noopSpan) Tracer() Tracer                                        { return defaultNoopTracer }
+func (n noopSpan) LogEvent(event string)                                 {}
+func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {}
+func (n noopSpan) Log(data LogData)                                      {}
+
+// StartSpan belongs to the Tracer interface.
+func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span {
+	return defaultNoopSpan
+}
+
+// Inject belongs to the Tracer interface.
+func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error {
+	return nil
+}
+
+// Extract belongs to the Tracer interface.
+func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) {
+	return nil, ErrSpanContextNotFound
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go
new file mode 100644
index 0000000..b0c275e
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/propagation.go
@@ -0,0 +1,176 @@
+package opentracing
+
+import (
+	"errors"
+	"net/http"
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// CORE PROPAGATION INTERFACES:
+///////////////////////////////////////////////////////////////////////////////
+
+var (
+	// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or
+	// Tracer.Extract() is not recognized by the Tracer implementation.
+	ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format")
+
+	// ErrSpanContextNotFound occurs when the `carrier` passed to
+	// Tracer.Extract() is valid and uncorrupted but has insufficient
+	// information to extract a SpanContext.
+	ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier")
+
+	// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to
+	// operate on a SpanContext which it is not prepared to handle (for
+	// example, since it was created by a different tracer implementation).
+	ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer")
+
+	// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract()
+	// implementations expect a different type of `carrier` than they are
+	// given.
+	ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier")
+
+	// ErrSpanContextCorrupted occurs when the `carrier` passed to
+	// Tracer.Extract() is of the expected type but is corrupted.
+	ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier")
+)
+
+///////////////////////////////////////////////////////////////////////////////
+// BUILTIN PROPAGATION FORMATS:
+///////////////////////////////////////////////////////////////////////////////
+
+// BuiltinFormat is used to demarcate the values within package `opentracing`
+// that are intended for use with the Tracer.Inject() and Tracer.Extract()
+// methods.
+type BuiltinFormat byte
+
+const (
+	// Binary represents SpanContexts as opaque binary data.
+	//
+	// For Tracer.Inject(): the carrier must be an `io.Writer`.
+	//
+	// For Tracer.Extract(): the carrier must be an `io.Reader`.
+	Binary BuiltinFormat = iota
+
+	// TextMap represents SpanContexts as key:value string pairs.
+	//
+	// Unlike HTTPHeaders, the TextMap format does not restrict the key or
+	// value character sets in any way.
+	//
+	// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+	//
+	// For Tracer.Extract(): the carrier must be a `TextMapReader`.
+	TextMap
+
+	// HTTPHeaders represents SpanContexts as HTTP header string pairs.
+	//
+	// Unlike TextMap, the HTTPHeaders format requires that the keys and values
+	// be valid as HTTP headers as-is (i.e., character casing may be unstable
+	// and special characters are disallowed in keys, values should be
+	// URL-escaped, etc).
+	//
+	// For Tracer.Inject(): the carrier must be a `TextMapWriter`.
+	//
+	// For Tracer.Extract(): the carrier must be a `TextMapReader`.
+	//
+	// See HTTPHeadersCarrier for an implementation of both TextMapWriter
+	// and TextMapReader that defers to an http.Header instance for storage.
+	// For example, Inject():
+	//
+	//    carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//    err := span.Tracer().Inject(
+	//        span.Context(), opentracing.HTTPHeaders, carrier)
+	//
+	// Or Extract():
+	//
+	//    carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//    clientContext, err := tracer.Extract(
+	//        opentracing.HTTPHeaders, carrier)
+	//
+	HTTPHeaders
+)
+
+// TextMapWriter is the Inject() carrier for the TextMap builtin format. With
+// it, the caller can encode a SpanContext for propagation as entries in a map
+// of unicode strings.
+type TextMapWriter interface {
+	// Set a key:value pair to the carrier. Multiple calls to Set() for the
+	// same key leads to undefined behavior.
+	//
+	// NOTE: The backing store for the TextMapWriter may contain data unrelated
+	// to SpanContext. As such, Inject() and Extract() implementations that
+	// call the TextMapWriter and TextMapReader interfaces must agree on a
+	// prefix or other convention to distinguish their own key:value pairs.
+	Set(key, val string)
+}
+
+// TextMapReader is the Extract() carrier for the TextMap builtin format. With it,
+// the caller can decode a propagated SpanContext as entries in a map of
+// unicode strings.
+type TextMapReader interface {
+	// ForeachKey returns TextMap contents via repeated calls to the `handler`
+	// function. If any call to `handler` returns a non-nil error, ForeachKey
+	// terminates and returns that error.
+	//
+	// NOTE: The backing store for the TextMapReader may contain data unrelated
+	// to SpanContext. As such, Inject() and Extract() implementations that
+	// call the TextMapWriter and TextMapReader interfaces must agree on a
+	// prefix or other convention to distinguish their own key:value pairs.
+	//
+	// The "foreach" callback pattern reduces unnecessary copying in some cases
+	// and also allows implementations to hold locks while the map is read.
+	ForeachKey(handler func(key, val string) error) error
+}
+
+// TextMapCarrier allows the use of regular map[string]string
+// as both TextMapWriter and TextMapReader.
+type TextMapCarrier map[string]string
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error {
+	for k, v := range c {
+		if err := handler(k, v); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// Set implements Set() of opentracing.TextMapWriter
+func (c TextMapCarrier) Set(key, val string) {
+	c[key] = val
+}
+
+// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader.
+//
+// Example usage for server side:
+//
+//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+//     clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+//
+// Example usage for client side:
+//
+//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+//     err := tracer.Inject(
+//         span.Context(),
+//         opentracing.HTTPHeaders,
+//         carrier)
+//
+type HTTPHeadersCarrier http.Header
+
+// Set conforms to the TextMapWriter interface.
+func (c HTTPHeadersCarrier) Set(key, val string) {
+	h := http.Header(c)
+	h.Set(key, val)
+}
+
+// ForeachKey conforms to the TextMapReader interface.
+func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error {
+	for k, vals := range c {
+		for _, v := range vals {
+			if err := handler(k, v); err != nil {
+				return err
+			}
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go
new file mode 100644
index 0000000..0d3fb53
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/span.go
@@ -0,0 +1,189 @@
+package opentracing
+
+import (
+	"time"
+
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// SpanContext represents Span state that must propagate to descendant Spans and across process
+// boundaries (e.g., a <trace_id, span_id, sampled> tuple).
+type SpanContext interface {
+	// ForeachBaggageItem grants access to all baggage items stored in the
+	// SpanContext.
+	// The handler function will be called for each baggage key/value pair.
+	// The ordering of items is not guaranteed.
+	//
+	// The bool return value indicates if the handler wants to continue iterating
+	// through the rest of the baggage items; for example if the handler is trying to
+	// find some baggage item by pattern matching the name, it can return false
+	// as soon as the item is found to stop further iterations.
+	ForeachBaggageItem(handler func(k, v string) bool)
+}
+
+// Span represents an active, un-finished span in the OpenTracing system.
+//
+// Spans are created by the Tracer interface.
+type Span interface {
+	// Sets the end timestamp and finalizes Span state.
+	//
+	// With the exception of calls to Context() (which are always allowed),
+	// Finish() must be the last call made to any span instance, and to do
+	// otherwise leads to undefined behavior.
+	Finish()
+	// FinishWithOptions is like Finish() but with explicit control over
+	// timestamps and log data.
+	FinishWithOptions(opts FinishOptions)
+
+	// Context() yields the SpanContext for this Span. Note that the return
+	// value of Context() is still valid after a call to Span.Finish(), as is
+	// a call to Span.Context() after a call to Span.Finish().
+	Context() SpanContext
+
+	// Sets or changes the operation name.
+	//
+	// Returns a reference to this Span for chaining.
+	SetOperationName(operationName string) Span
+
+	// Adds a tag to the span.
+	//
+	// If there is a pre-existing tag set for `key`, it is overwritten.
+	//
+	// Tag values can be numeric types, strings, or bools. The behavior of
+	// other tag value types is undefined at the OpenTracing level. If a
+	// tracing system does not know how to handle a particular value type, it
+	// may ignore the tag, but shall not panic.
+	//
+	// Returns a reference to this Span for chaining.
+	SetTag(key string, value interface{}) Span
+
+	// LogFields is an efficient and type-checked way to record key:value
+	// logging data about a Span, though the programming interface is a little
+	// more verbose than LogKV(). Here's an example:
+	//
+	//    span.LogFields(
+	//        log.String("event", "soft error"),
+	//        log.String("type", "cache timeout"),
+	//        log.Int("waited.millis", 1500))
+	//
+	// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData.
+	LogFields(fields ...log.Field)
+
+	// LogKV is a concise, readable way to record key:value logging data about
+	// a Span, though unfortunately this also makes it less efficient and less
+	// type-safe than LogFields(). Here's an example:
+	//
+	//    span.LogKV(
+	//        "event", "soft error",
+	//        "type", "cache timeout",
+	//        "waited.millis", 1500)
+	//
+	// For LogKV (as opposed to LogFields()), the parameters must appear as
+	// key-value pairs, like
+	//
+	//    span.LogKV(key1, val1, key2, val2, key3, val3, ...)
+	//
+	// The keys must all be strings. The values may be strings, numeric types,
+	// bools, Go error instances, or arbitrary structs.
+	//
+	// (Note to implementors: consider the log.InterleavedKVToFields() helper)
+	LogKV(alternatingKeyValues ...interface{})
+
+	// SetBaggageItem sets a key:value pair on this Span and its SpanContext
+	// that also propagates to descendants of this Span.
+	//
+	// SetBaggageItem() enables powerful functionality given a full-stack
+	// opentracing integration (e.g., arbitrary application data from a mobile
+	// app can make it, transparently, all the way into the depths of a storage
+	// system), and with it some powerful costs: use this feature with care.
+	//
+	// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to
+	// *future* causal descendants of the associated Span.
+	//
+	// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and
+	// value is copied into every local *and remote* child of the associated
+	// Span, and that can add up to a lot of network and cpu overhead.
+	//
+	// Returns a reference to this Span for chaining.
+	SetBaggageItem(restrictedKey, value string) Span
+
+	// Gets the value for a baggage item given its key. Returns the empty string
+	// if the value isn't found in this Span.
+	BaggageItem(restrictedKey string) string
+
+	// Provides access to the Tracer that created this Span.
+	Tracer() Tracer
+
+	// Deprecated: use LogFields or LogKV
+	LogEvent(event string)
+	// Deprecated: use LogFields or LogKV
+	LogEventWithPayload(event string, payload interface{})
+	// Deprecated: use LogFields or LogKV
+	Log(data LogData)
+}
+
+// LogRecord is data associated with a single Span log. Every LogRecord
+// instance must specify at least one Field.
+type LogRecord struct {
+	Timestamp time.Time
+	Fields    []log.Field
+}
+
+// FinishOptions allows Span.FinishWithOptions callers to override the finish
+// timestamp and provide log data via a bulk interface.
+type FinishOptions struct {
+	// FinishTime overrides the Span's finish time, or implicitly becomes
+	// time.Now() if FinishTime.IsZero().
+	//
+	// FinishTime must resolve to a timestamp that's >= the Span's StartTime
+	// (per StartSpanOptions).
+	FinishTime time.Time
+
+	// LogRecords allows the caller to specify the contents of many LogFields()
+	// calls with a single slice. May be nil.
+	//
+	// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must
+	// be set explicitly). Also, they must be >= the Span's start timestamp and
+	// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the
+	// behavior of FinishWithOptions() is undefined.
+	//
+	// If specified, the caller hands off ownership of LogRecords at
+	// FinishWithOptions() invocation time.
+	//
+	// If specified, the (deprecated) BulkLogData must be nil or empty.
+	LogRecords []LogRecord
+
+	// BulkLogData is DEPRECATED.
+	BulkLogData []LogData
+}
+
+// LogData is DEPRECATED
+type LogData struct {
+	Timestamp time.Time
+	Event     string
+	Payload   interface{}
+}
+
+// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord
+func (ld *LogData) ToLogRecord() LogRecord {
+	var literalTimestamp time.Time
+	if ld.Timestamp.IsZero() {
+		literalTimestamp = time.Now()
+	} else {
+		literalTimestamp = ld.Timestamp
+	}
+	rval := LogRecord{
+		Timestamp: literalTimestamp,
+	}
+	if ld.Payload == nil {
+		rval.Fields = []log.Field{
+			log.String("event", ld.Event),
+		}
+	} else {
+		rval.Fields = []log.Field{
+			log.String("event", ld.Event),
+			log.Object("payload", ld.Payload),
+		}
+	}
+	return rval
+}
diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go
new file mode 100644
index 0000000..715f0ce
--- /dev/null
+++ b/vendor/github.com/opentracing/opentracing-go/tracer.go
@@ -0,0 +1,304 @@
+package opentracing
+
+import "time"
+
+// Tracer is a simple, thin interface for Span creation and SpanContext
+// propagation.
+type Tracer interface {
+
+	// Create, start, and return a new Span with the given `operationName` and
+	// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows
+	// from the "functional options" pattern, per
+	// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis)
+	//
+	// A Span with no SpanReference options (e.g., opentracing.ChildOf() or
+	// opentracing.FollowsFrom()) becomes the root of its own trace.
+	//
+	// Examples:
+	//
+	//     var tracer opentracing.Tracer = ...
+	//
+	//     // The root-span case:
+	//     sp := tracer.StartSpan("GetFeed")
+	//
+	//     // The vanilla child span case:
+	//     sp := tracer.StartSpan(
+	//         "GetFeed",
+	//         opentracing.ChildOf(parentSpan.Context()))
+	//
+	//     // All the bells and whistles:
+	//     sp := tracer.StartSpan(
+	//         "GetFeed",
+	//         opentracing.ChildOf(parentSpan.Context()),
+	//         opentracing.Tag{"user_agent", loggedReq.UserAgent},
+	//         opentracing.StartTime(loggedReq.Timestamp),
+	//     )
+	//
+	StartSpan(operationName string, opts ...StartSpanOption) Span
+
+	// Inject() takes the `sm` SpanContext instance and injects it for
+	// propagation within `carrier`. The actual type of `carrier` depends on
+	// the value of `format`.
+	//
+	// OpenTracing defines a common set of `format` values (see BuiltinFormat),
+	// and each has an expected carrier type.
+	//
+	// Other packages may declare their own `format` values, much like the keys
+	// used by `context.Context` (see https://godoc.org/context#WithValue).
+	//
+	// Example usage (sans error handling):
+	//
+	//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//     err := tracer.Inject(
+	//         span.Context(),
+	//         opentracing.HTTPHeaders,
+	//         carrier)
+	//
+	// NOTE: All opentracing.Tracer implementations MUST support all
+	// BuiltinFormats.
+	//
+	// Implementations may return opentracing.ErrUnsupportedFormat if `format`
+	// is not supported by (or not known by) the implementation.
+	//
+	// Implementations may return opentracing.ErrInvalidCarrier or any other
+	// implementation-specific error if the format is supported but injection
+	// fails anyway.
+	//
+	// See Tracer.Extract().
+	Inject(sm SpanContext, format interface{}, carrier interface{}) error
+
+	// Extract() returns a SpanContext instance given `format` and `carrier`.
+	//
+	// OpenTracing defines a common set of `format` values (see BuiltinFormat),
+	// and each has an expected carrier type.
+	//
+	// Other packages may declare their own `format` values, much like the keys
+	// used by `context.Context` (see
+	// https://godoc.org/golang.org/x/net/context#WithValue).
+	//
+	// Example usage (with StartSpan):
+	//
+	//
+	//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header)
+	//     clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier)
+	//
+	//     // ... assuming the ultimate goal here is to resume the trace with a
+	//     // server-side Span:
+	//     var serverSpan opentracing.Span
+	//     if err == nil {
+	//         span = tracer.StartSpan(
+	//             rpcMethodName, ext.RPCServerOption(clientContext))
+	//     } else {
+	//         span = tracer.StartSpan(rpcMethodName)
+	//     }
+	//
+	//
+	// NOTE: All opentracing.Tracer implementations MUST support all
+	// BuiltinFormats.
+	//
+	// Return values:
+	//  - A successful Extract returns a SpanContext instance and a nil error
+	//  - If there was simply no SpanContext to extract in `carrier`, Extract()
+	//    returns (nil, opentracing.ErrSpanContextNotFound)
+	//  - If `format` is unsupported or unrecognized, Extract() returns (nil,
+	//    opentracing.ErrUnsupportedFormat)
+	//  - If there are more fundamental problems with the `carrier` object,
+	//    Extract() may return opentracing.ErrInvalidCarrier,
+	//    opentracing.ErrSpanContextCorrupted, or implementation-specific
+	//    errors.
+	//
+	// See Tracer.Inject().
+	Extract(format interface{}, carrier interface{}) (SpanContext, error)
+}
+
+// StartSpanOptions allows Tracer.StartSpan() callers and implementors a
+// mechanism to override the start timestamp, specify Span References, and make
+// a single Tag or multiple Tags available at Span start time.
+//
+// StartSpan() callers should look at the StartSpanOption interface and
+// implementations available in this package.
+//
+// Tracer implementations can convert a slice of `StartSpanOption` instances
+// into a `StartSpanOptions` struct like so:
+//
+//     func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+//         sso := opentracing.StartSpanOptions{}
+//         for _, o := range opts {
+//             o.Apply(&sso)
+//         }
+//         ...
+//     }
+//
+type StartSpanOptions struct {
+	// Zero or more causal references to other Spans (via their SpanContext).
+	// If empty, start a "root" Span (i.e., start a new trace).
+	References []SpanReference
+
+	// StartTime overrides the Span's start time, or implicitly becomes
+	// time.Now() if StartTime.IsZero().
+	StartTime time.Time
+
+	// Tags may have zero or more entries; the restrictions on map values are
+	// identical to those for Span.SetTag(). May be nil.
+	//
+	// If specified, the caller hands off ownership of Tags at
+	// StartSpan() invocation time.
+	Tags map[string]interface{}
+}
+
+// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan.
+//
+// StartSpanOption borrows from the "functional options" pattern, per
+// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
+type StartSpanOption interface {
+	Apply(*StartSpanOptions)
+}
+
+// SpanReferenceType is an enum type describing different categories of
+// relationships between two Spans. If Span-2 refers to Span-1, the
+// SpanReferenceType describes Span-1 from Span-2's perspective. For example,
+// ChildOfRef means that Span-1 created Span-2.
+//
+// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for
+// completion; e.g., Span-2 may be part of a background job enqueued by Span-1,
+// or Span-2 may be sitting in a distributed queue behind Span-1.
+type SpanReferenceType int
+
+const (
+	// ChildOfRef refers to a parent Span that caused *and* somehow depends
+	// upon the new child Span. Often (but not always), the parent Span cannot
+	// finish until the child Span does.
+	//
+	// An timing diagram for a ChildOfRef that's blocked on the new Span:
+	//
+	//     [-Parent Span---------]
+	//          [-Child Span----]
+	//
+	// See http://opentracing.io/spec/
+	//
+	// See opentracing.ChildOf()
+	ChildOfRef SpanReferenceType = iota
+
+	// FollowsFromRef refers to a parent Span that does not depend in any way
+	// on the result of the new child Span. For instance, one might use
+	// FollowsFromRefs to describe pipeline stages separated by queues,
+	// or a fire-and-forget cache insert at the tail end of a web request.
+	//
+	// A FollowsFromRef Span is part of the same logical trace as the new Span:
+	// i.e., the new Span is somehow caused by the work of its FollowsFromRef.
+	//
+	// All of the following could be valid timing diagrams for children that
+	// "FollowFrom" a parent.
+	//
+	//     [-Parent Span-]  [-Child Span-]
+	//
+	//
+	//     [-Parent Span--]
+	//      [-Child Span-]
+	//
+	//
+	//     [-Parent Span-]
+	//                 [-Child Span-]
+	//
+	// See http://opentracing.io/spec/
+	//
+	// See opentracing.FollowsFrom()
+	FollowsFromRef
+)
+
+// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a
+// referenced SpanContext. See the SpanReferenceType documentation for
+// supported relationships.  If SpanReference is created with
+// ReferencedContext==nil, it has no effect. Thus it allows for a more concise
+// syntax for starting spans:
+//
+//     sc, _ := tracer.Extract(someFormat, someCarrier)
+//     span := tracer.StartSpan("operation", opentracing.ChildOf(sc))
+//
+// The `ChildOf(sc)` option above will not panic if sc == nil, it will just
+// not add the parent span reference to the options.
+type SpanReference struct {
+	Type              SpanReferenceType
+	ReferencedContext SpanContext
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (r SpanReference) Apply(o *StartSpanOptions) {
+	if r.ReferencedContext != nil {
+		o.References = append(o.References, r)
+	}
+}
+
+// ChildOf returns a StartSpanOption pointing to a dependent parent span.
+// If sc == nil, the option has no effect.
+//
+// See ChildOfRef, SpanReference
+func ChildOf(sc SpanContext) SpanReference {
+	return SpanReference{
+		Type:              ChildOfRef,
+		ReferencedContext: sc,
+	}
+}
+
+// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused
+// the child Span but does not directly depend on its result in any way.
+// If sc == nil, the option has no effect.
+//
+// See FollowsFromRef, SpanReference
+func FollowsFrom(sc SpanContext) SpanReference {
+	return SpanReference{
+		Type:              FollowsFromRef,
+		ReferencedContext: sc,
+	}
+}
+
+// StartTime is a StartSpanOption that sets an explicit start timestamp for the
+// new Span.
+type StartTime time.Time
+
+// Apply satisfies the StartSpanOption interface.
+func (t StartTime) Apply(o *StartSpanOptions) {
+	o.StartTime = time.Time(t)
+}
+
+// Tags are a generic map from an arbitrary string key to an opaque value type.
+// The underlying tracing system is responsible for interpreting and
+// serializing the values.
+type Tags map[string]interface{}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tags) Apply(o *StartSpanOptions) {
+	if o.Tags == nil {
+		o.Tags = make(map[string]interface{})
+	}
+	for k, v := range t {
+		o.Tags[k] = v
+	}
+}
+
+// Tag may be passed as a StartSpanOption to add a tag to new spans,
+// or its Set method may be used to apply the tag to an existing Span,
+// for example:
+//
+// tracer.StartSpan("opName", Tag{"Key", value})
+//
+//   or
+//
+// Tag{"key", value}.Set(span)
+type Tag struct {
+	Key   string
+	Value interface{}
+}
+
+// Apply satisfies the StartSpanOption interface.
+func (t Tag) Apply(o *StartSpanOptions) {
+	if o.Tags == nil {
+		o.Tags = make(map[string]interface{})
+	}
+	o.Tags[t.Key] = t.Value
+}
+
+// Set applies the tag to an existing Span.
+func (t Tag) Set(s Span) {
+	s.SetTag(t.Key, t.Value)
+}
diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore
new file mode 100644
index 0000000..daf913b
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.gitignore
@@ -0,0 +1,24 @@
+# Compiled Object files, Static and Dynamic libs (Shared Objects)
+*.o
+*.a
+*.so
+
+# Folders
+_obj
+_test
+
+# Architecture specific extensions/prefixes
+*.[568vq]
+[568vq].out
+
+*.cgo1.go
+*.cgo2.c
+_cgo_defun.c
+_cgo_gotypes.go
+_cgo_export.*
+
+_testmain.go
+
+*.exe
+*.test
+*.prof
diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml
new file mode 100644
index 0000000..9159de0
--- /dev/null
+++ b/vendor/github.com/pkg/errors/.travis.yml
@@ -0,0 +1,10 @@
+language: go
+go_import_path: github.com/pkg/errors
+go:
+  - 1.11.x
+  - 1.12.x
+  - 1.13.x
+  - tip
+
+script:
+  - make check
diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE
new file mode 100644
index 0000000..835ba3e
--- /dev/null
+++ b/vendor/github.com/pkg/errors/LICENSE
@@ -0,0 +1,23 @@
+Copyright (c) 2015, Dave Cheney <dave@cheney.net>
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+  list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice,
+  this list of conditions and the following disclaimer in the documentation
+  and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile
new file mode 100644
index 0000000..ce9d7cd
--- /dev/null
+++ b/vendor/github.com/pkg/errors/Makefile
@@ -0,0 +1,44 @@
+PKGS := github.com/pkg/errors
+SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS))
+GO := go
+
+check: test vet gofmt misspell unconvert staticcheck ineffassign unparam
+
+test: 
+	$(GO) test $(PKGS)
+
+vet: | test
+	$(GO) vet $(PKGS)
+
+staticcheck:
+	$(GO) get honnef.co/go/tools/cmd/staticcheck
+	staticcheck -checks all $(PKGS)
+
+misspell:
+	$(GO) get github.com/client9/misspell/cmd/misspell
+	misspell \
+		-locale GB \
+		-error \
+		*.md *.go
+
+unconvert:
+	$(GO) get github.com/mdempsky/unconvert
+	unconvert -v $(PKGS)
+
+ineffassign:
+	$(GO) get github.com/gordonklaus/ineffassign
+	find $(SRCDIRS) -name '*.go' | xargs ineffassign
+
+pedantic: check errcheck
+
+unparam:
+	$(GO) get mvdan.cc/unparam
+	unparam ./...
+
+errcheck:
+	$(GO) get github.com/kisielk/errcheck
+	errcheck $(PKGS)
+
+gofmt:  
+	@echo Checking code is gofmted
+	@test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)"
diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md
new file mode 100644
index 0000000..54dfdcb
--- /dev/null
+++ b/vendor/github.com/pkg/errors/README.md
@@ -0,0 +1,59 @@
+# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge)
+
+Package errors provides simple error handling primitives.
+
+`go get github.com/pkg/errors`
+
+The traditional error handling idiom in Go is roughly akin to
+```go
+if err != nil {
+        return err
+}
+```
+which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error.
+
+## Adding context to an error
+
+The errors.Wrap function returns a new error that adds context to the original error. For example
+```go
+_, err := ioutil.ReadAll(r)
+if err != nil {
+        return errors.Wrap(err, "read failed")
+}
+```
+## Retrieving the cause of an error
+
+Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`.
+```go
+type causer interface {
+        Cause() error
+}
+```
+`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example:
+```go
+switch err := errors.Cause(err).(type) {
+case *MyError:
+        // handle specifically
+default:
+        // unknown error
+}
+```
+
+[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors).
+
+## Roadmap
+
+With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows:
+
+- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible)
+- 1.0. Final release.
+
+## Contributing
+
+Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. 
+
+Before sending a PR, please discuss your change by raising an issue.
+
+## License
+
+BSD-2-Clause
diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml
new file mode 100644
index 0000000..a932ead
--- /dev/null
+++ b/vendor/github.com/pkg/errors/appveyor.yml
@@ -0,0 +1,32 @@
+version: build-{build}.{branch}
+
+clone_folder: C:\gopath\src\github.com\pkg\errors
+shallow_clone: true # for startup speed
+
+environment:
+  GOPATH: C:\gopath
+
+platform:
+  - x64
+
+# http://www.appveyor.com/docs/installed-software
+install:
+  # some helpful output for debugging builds
+  - go version
+  - go env
+  # pre-installed MinGW at C:\MinGW is 32bit only
+  # but MSYS2 at C:\msys64 has mingw64
+  - set PATH=C:\msys64\mingw64\bin;%PATH%
+  - gcc --version
+  - g++ --version
+
+build_script:
+  - go install -v ./...
+
+test_script:
+  - set PATH=C:\gopath\bin;%PATH%
+  - go test -v ./...
+
+#artifacts:
+#  - path: '%GOPATH%\bin\*.exe'
+deploy: off
diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go
new file mode 100644
index 0000000..161aea2
--- /dev/null
+++ b/vendor/github.com/pkg/errors/errors.go
@@ -0,0 +1,288 @@
+// Package errors provides simple error handling primitives.
+//
+// The traditional error handling idiom in Go is roughly akin to
+//
+//     if err != nil {
+//             return err
+//     }
+//
+// which when applied recursively up the call stack results in error reports
+// without context or debugging information. The errors package allows
+// programmers to add context to the failure path in their code in a way
+// that does not destroy the original value of the error.
+//
+// Adding context to an error
+//
+// The errors.Wrap function returns a new error that adds context to the
+// original error by recording a stack trace at the point Wrap is called,
+// together with the supplied message. For example
+//
+//     _, err := ioutil.ReadAll(r)
+//     if err != nil {
+//             return errors.Wrap(err, "read failed")
+//     }
+//
+// If additional control is required, the errors.WithStack and
+// errors.WithMessage functions destructure errors.Wrap into its component
+// operations: annotating an error with a stack trace and with a message,
+// respectively.
+//
+// Retrieving the cause of an error
+//
+// Using errors.Wrap constructs a stack of errors, adding context to the
+// preceding error. Depending on the nature of the error it may be necessary
+// to reverse the operation of errors.Wrap to retrieve the original error
+// for inspection. Any error value which implements this interface
+//
+//     type causer interface {
+//             Cause() error
+//     }
+//
+// can be inspected by errors.Cause. errors.Cause will recursively retrieve
+// the topmost error that does not implement causer, which is assumed to be
+// the original cause. For example:
+//
+//     switch err := errors.Cause(err).(type) {
+//     case *MyError:
+//             // handle specifically
+//     default:
+//             // unknown error
+//     }
+//
+// Although the causer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// Formatted printing of errors
+//
+// All error values returned from this package implement fmt.Formatter and can
+// be formatted by the fmt package. The following verbs are supported:
+//
+//     %s    print the error. If the error has a Cause it will be
+//           printed recursively.
+//     %v    see %s
+//     %+v   extended format. Each Frame of the error's StackTrace will
+//           be printed in detail.
+//
+// Retrieving the stack trace of an error or wrapper
+//
+// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
+// invoked. This information can be retrieved with the following interface:
+//
+//     type stackTracer interface {
+//             StackTrace() errors.StackTrace
+//     }
+//
+// The returned errors.StackTrace type is defined as
+//
+//     type StackTrace []Frame
+//
+// The Frame type represents a call site in the stack trace. Frame supports
+// the fmt.Formatter interface that can be used for printing information about
+// the stack trace of this error. For example:
+//
+//     if err, ok := err.(stackTracer); ok {
+//             for _, f := range err.StackTrace() {
+//                     fmt.Printf("%+s:%d\n", f, f)
+//             }
+//     }
+//
+// Although the stackTracer interface is not exported by this package, it is
+// considered a part of its stable public interface.
+//
+// See the documentation for Frame.Format for more details.
+package errors
+
+import (
+	"fmt"
+	"io"
+)
+
+// New returns an error with the supplied message.
+// New also records the stack trace at the point it was called.
+func New(message string) error {
+	return &fundamental{
+		msg:   message,
+		stack: callers(),
+	}
+}
+
+// Errorf formats according to a format specifier and returns the string
+// as a value that satisfies error.
+// Errorf also records the stack trace at the point it was called.
+func Errorf(format string, args ...interface{}) error {
+	return &fundamental{
+		msg:   fmt.Sprintf(format, args...),
+		stack: callers(),
+	}
+}
+
+// fundamental is an error that has a message and a stack, but no caller.
+type fundamental struct {
+	msg string
+	*stack
+}
+
+func (f *fundamental) Error() string { return f.msg }
+
+func (f *fundamental) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			io.WriteString(s, f.msg)
+			f.stack.Format(s, verb)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, f.msg)
+	case 'q':
+		fmt.Fprintf(s, "%q", f.msg)
+	}
+}
+
+// WithStack annotates err with a stack trace at the point WithStack was called.
+// If err is nil, WithStack returns nil.
+func WithStack(err error) error {
+	if err == nil {
+		return nil
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+type withStack struct {
+	error
+	*stack
+}
+
+func (w *withStack) Cause() error { return w.error }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withStack) Unwrap() error { return w.error }
+
+func (w *withStack) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v", w.Cause())
+			w.stack.Format(s, verb)
+			return
+		}
+		fallthrough
+	case 's':
+		io.WriteString(s, w.Error())
+	case 'q':
+		fmt.Fprintf(s, "%q", w.Error())
+	}
+}
+
+// Wrap returns an error annotating err with a stack trace
+// at the point Wrap is called, and the supplied message.
+// If err is nil, Wrap returns nil.
+func Wrap(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   message,
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+// Wrapf returns an error annotating err with a stack trace
+// at the point Wrapf is called, and the format specifier.
+// If err is nil, Wrapf returns nil.
+func Wrapf(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	err = &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+	return &withStack{
+		err,
+		callers(),
+	}
+}
+
+// WithMessage annotates err with a new message.
+// If err is nil, WithMessage returns nil.
+func WithMessage(err error, message string) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   message,
+	}
+}
+
+// WithMessagef annotates err with the format specifier.
+// If err is nil, WithMessagef returns nil.
+func WithMessagef(err error, format string, args ...interface{}) error {
+	if err == nil {
+		return nil
+	}
+	return &withMessage{
+		cause: err,
+		msg:   fmt.Sprintf(format, args...),
+	}
+}
+
+type withMessage struct {
+	cause error
+	msg   string
+}
+
+func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() }
+func (w *withMessage) Cause() error  { return w.cause }
+
+// Unwrap provides compatibility for Go 1.13 error chains.
+func (w *withMessage) Unwrap() error { return w.cause }
+
+func (w *withMessage) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		if s.Flag('+') {
+			fmt.Fprintf(s, "%+v\n", w.Cause())
+			io.WriteString(s, w.msg)
+			return
+		}
+		fallthrough
+	case 's', 'q':
+		io.WriteString(s, w.Error())
+	}
+}
+
+// Cause returns the underlying cause of the error, if possible.
+// An error value has a cause if it implements the following
+// interface:
+//
+//     type causer interface {
+//            Cause() error
+//     }
+//
+// If the error does not implement Cause, the original error will
+// be returned. If the error is nil, nil will be returned without further
+// investigation.
+func Cause(err error) error {
+	type causer interface {
+		Cause() error
+	}
+
+	for err != nil {
+		cause, ok := err.(causer)
+		if !ok {
+			break
+		}
+		err = cause.Cause()
+	}
+	return err
+}
diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go
new file mode 100644
index 0000000..be0d10d
--- /dev/null
+++ b/vendor/github.com/pkg/errors/go113.go
@@ -0,0 +1,38 @@
+// +build go1.13
+
+package errors
+
+import (
+	stderrors "errors"
+)
+
+// Is reports whether any error in err's chain matches target.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error is considered to match a target if it is equal to that target or if
+// it implements a method Is(error) bool such that Is(target) returns true.
+func Is(err, target error) bool { return stderrors.Is(err, target) }
+
+// As finds the first error in err's chain that matches target, and if so, sets
+// target to that error value and returns true.
+//
+// The chain consists of err itself followed by the sequence of errors obtained by
+// repeatedly calling Unwrap.
+//
+// An error matches target if the error's concrete value is assignable to the value
+// pointed to by target, or if the error has a method As(interface{}) bool such that
+// As(target) returns true. In the latter case, the As method is responsible for
+// setting target.
+//
+// As will panic if target is not a non-nil pointer to either a type that implements
+// error, or to any interface type. As returns false if err is nil.
+func As(err error, target interface{}) bool { return stderrors.As(err, target) }
+
+// Unwrap returns the result of calling the Unwrap method on err, if err's
+// type contains an Unwrap method returning error.
+// Otherwise, Unwrap returns nil.
+func Unwrap(err error) error {
+	return stderrors.Unwrap(err)
+}
diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go
new file mode 100644
index 0000000..779a834
--- /dev/null
+++ b/vendor/github.com/pkg/errors/stack.go
@@ -0,0 +1,177 @@
+package errors
+
+import (
+	"fmt"
+	"io"
+	"path"
+	"runtime"
+	"strconv"
+	"strings"
+)
+
+// Frame represents a program counter inside a stack frame.
+// For historical reasons if Frame is interpreted as a uintptr
+// its value represents the program counter + 1.
+type Frame uintptr
+
+// pc returns the program counter for this frame;
+// multiple frames may have the same PC value.
+func (f Frame) pc() uintptr { return uintptr(f) - 1 }
+
+// file returns the full path to the file that contains the
+// function for this Frame's pc.
+func (f Frame) file() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	file, _ := fn.FileLine(f.pc())
+	return file
+}
+
+// line returns the line number of source code of the
+// function for this Frame's pc.
+func (f Frame) line() int {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return 0
+	}
+	_, line := fn.FileLine(f.pc())
+	return line
+}
+
+// name returns the name of this function, if known.
+func (f Frame) name() string {
+	fn := runtime.FuncForPC(f.pc())
+	if fn == nil {
+		return "unknown"
+	}
+	return fn.Name()
+}
+
+// Format formats the frame according to the fmt.Formatter interface.
+//
+//    %s    source file
+//    %d    source line
+//    %n    function name
+//    %v    equivalent to %s:%d
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+s   function name and path of source file relative to the compile time
+//          GOPATH separated by \n\t (<funcname>\n\t<path>)
+//    %+v   equivalent to %+s:%d
+func (f Frame) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 's':
+		switch {
+		case s.Flag('+'):
+			io.WriteString(s, f.name())
+			io.WriteString(s, "\n\t")
+			io.WriteString(s, f.file())
+		default:
+			io.WriteString(s, path.Base(f.file()))
+		}
+	case 'd':
+		io.WriteString(s, strconv.Itoa(f.line()))
+	case 'n':
+		io.WriteString(s, funcname(f.name()))
+	case 'v':
+		f.Format(s, 's')
+		io.WriteString(s, ":")
+		f.Format(s, 'd')
+	}
+}
+
+// MarshalText formats a stacktrace Frame as a text string. The output is the
+// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs.
+func (f Frame) MarshalText() ([]byte, error) {
+	name := f.name()
+	if name == "unknown" {
+		return []byte(name), nil
+	}
+	return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil
+}
+
+// StackTrace is stack of Frames from innermost (newest) to outermost (oldest).
+type StackTrace []Frame
+
+// Format formats the stack of Frames according to the fmt.Formatter interface.
+//
+//    %s	lists source files for each Frame in the stack
+//    %v	lists the source file and line number for each Frame in the stack
+//
+// Format accepts flags that alter the printing of some verbs, as follows:
+//
+//    %+v   Prints filename, function, and line number for each Frame in the stack.
+func (st StackTrace) Format(s fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case s.Flag('+'):
+			for _, f := range st {
+				io.WriteString(s, "\n")
+				f.Format(s, verb)
+			}
+		case s.Flag('#'):
+			fmt.Fprintf(s, "%#v", []Frame(st))
+		default:
+			st.formatSlice(s, verb)
+		}
+	case 's':
+		st.formatSlice(s, verb)
+	}
+}
+
+// formatSlice will format this StackTrace into the given buffer as a slice of
+// Frame, only valid when called with '%s' or '%v'.
+func (st StackTrace) formatSlice(s fmt.State, verb rune) {
+	io.WriteString(s, "[")
+	for i, f := range st {
+		if i > 0 {
+			io.WriteString(s, " ")
+		}
+		f.Format(s, verb)
+	}
+	io.WriteString(s, "]")
+}
+
+// stack represents a stack of program counters.
+type stack []uintptr
+
+func (s *stack) Format(st fmt.State, verb rune) {
+	switch verb {
+	case 'v':
+		switch {
+		case st.Flag('+'):
+			for _, pc := range *s {
+				f := Frame(pc)
+				fmt.Fprintf(st, "\n%+v", f)
+			}
+		}
+	}
+}
+
+func (s *stack) StackTrace() StackTrace {
+	f := make([]Frame, len(*s))
+	for i := 0; i < len(f); i++ {
+		f[i] = Frame((*s)[i])
+	}
+	return f
+}
+
+func callers() *stack {
+	const depth = 32
+	var pcs [depth]uintptr
+	n := runtime.Callers(3, pcs[:])
+	var st stack = pcs[0:n]
+	return &st
+}
+
+// funcname removes the path prefix component of a function's name reported by func.Name().
+func funcname(name string) string {
+	i := strings.LastIndex(name, "/")
+	name = name[i+1:]
+	i = strings.Index(name, ".")
+	return name[i+1:]
+}
diff --git a/vendor/github.com/rivo/uniseg/LICENSE.txt b/vendor/github.com/rivo/uniseg/LICENSE.txt
new file mode 100644
index 0000000..5040f1e
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/LICENSE.txt
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2019 Oliver Kuederle
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/vendor/github.com/rivo/uniseg/README.md b/vendor/github.com/rivo/uniseg/README.md
new file mode 100644
index 0000000..f8da293
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/README.md
@@ -0,0 +1,62 @@
+# Unicode Text Segmentation for Go
+
+[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg)](https://godoc.org/github.com/rivo/uniseg)
+[![Go Report](https://img.shields.io/badge/go%20report-A%2B-brightgreen.svg)](https://goreportcard.com/report/github.com/rivo/uniseg)
+
+This Go package implements Unicode Text Segmentation according to [Unicode Standard Annex #29](http://unicode.org/reports/tr29/) (Unicode version 12.0.0).
+
+At this point, only the determination of grapheme cluster boundaries is implemented.
+
+## Background
+
+In Go, [strings are read-only slices of bytes](https://blog.golang.org/strings). They can be turned into Unicode code points using the `for` loop or by casting: `[]rune(str)`. However, multiple code points may be combined into one user-perceived character or what the Unicode specification calls "grapheme cluster". Here are some examples:
+
+|String|Bytes (UTF-8)|Code points (runes)|Grapheme clusters|
+|-|-|-|-|
+|Käse|6 bytes: `4b 61 cc 88 73 65`|5 code points: `4b 61 308 73 65`|4 clusters: `[4b],[61 308],[73],[65]`|
+|🏳️‍🌈|14 bytes: `f0 9f 8f b3 ef b8 8f e2 80 8d f0 9f 8c 88`|4 code points: `1f3f3 fe0f 200d 1f308`|1 cluster: `[1f3f3 fe0f 200d 1f308]`|
+|🇩🇪|8 bytes: `f0 9f 87 a9 f0 9f 87 aa`|2 code points: `1f1e9 1f1ea`|1 cluster: `[1f1e9 1f1ea]`|
+
+This package provides a tool to iterate over these grapheme clusters. This may be used to determine the number of user-perceived characters, to split strings in their intended places, or to extract individual characters which form a unit.
+
+## Installation
+
+```bash
+go get github.com/rivo/uniseg
+```
+
+## Basic Example
+
+```go
+package uniseg
+
+import (
+	"fmt"
+
+	"github.com/rivo/uniseg"
+)
+
+func main() {
+	gr := uniseg.NewGraphemes("👍🏼!")
+	for gr.Next() {
+		fmt.Printf("%x ", gr.Runes())
+	}
+	// Output: [1f44d 1f3fc] [21]
+}
+```
+
+## Documentation
+
+Refer to https://godoc.org/github.com/rivo/uniseg for the package's documentation.
+
+## Dependencies
+
+This package does not depend on any packages outside the standard library.
+
+## Your Feedback
+
+Add your issue here on GitHub. Feel free to get in touch if you have any questions.
+
+## Version
+
+Version tags will be introduced once Golang modules are official. Consider this version 0.1.
diff --git a/vendor/github.com/rivo/uniseg/doc.go b/vendor/github.com/rivo/uniseg/doc.go
new file mode 100644
index 0000000..60c737d
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/doc.go
@@ -0,0 +1,8 @@
+/*
+Package uniseg implements Unicode Text Segmentation according to Unicode
+Standard Annex #29 (http://unicode.org/reports/tr29/).
+
+At this point, only the determination of grapheme cluster boundaries is
+implemented.
+*/
+package uniseg
diff --git a/vendor/github.com/rivo/uniseg/go.mod b/vendor/github.com/rivo/uniseg/go.mod
new file mode 100644
index 0000000..a54280b
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/go.mod
@@ -0,0 +1,3 @@
+module github.com/rivo/uniseg
+
+go 1.12
diff --git a/vendor/github.com/rivo/uniseg/grapheme.go b/vendor/github.com/rivo/uniseg/grapheme.go
new file mode 100644
index 0000000..fbb846b
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/grapheme.go
@@ -0,0 +1,258 @@
+package uniseg
+
+// The states of the grapheme cluster parser.
+const (
+	grAny = iota
+	grCR
+	grControlLF
+	grL
+	grLVV
+	grLVTT
+	grPrepend
+	grExtendedPictographic
+	grExtendedPictographicZWJ
+	grRIOdd
+	grRIEven
+)
+
+// The grapheme cluster parser's breaking instructions.
+const (
+	grNoBoundary = iota
+	grBoundary
+)
+
+// The grapheme cluster parser's state transitions. Maps (state, property) to
+// (new state, breaking instruction, rule number). The breaking instruction
+// always refers to the boundary between the last and next code point.
+//
+// This map is queried as follows:
+//
+//   1. Find specific state + specific property. Stop if found.
+//   2. Find specific state + any property.
+//   3. Find any state + specific property.
+//   4. If only (2) or (3) (but not both) was found, stop.
+//   5. If both (2) and (3) were found, use state and breaking instruction from
+//      the transition with the lower rule number, prefer (3) if rule numbers
+//      are equal. Stop.
+//   6. Assume grAny and grBoundary.
+var grTransitions = map[[2]int][3]int{
+	// GB5
+	{grAny, prCR}:      {grCR, grBoundary, 50},
+	{grAny, prLF}:      {grControlLF, grBoundary, 50},
+	{grAny, prControl}: {grControlLF, grBoundary, 50},
+
+	// GB4
+	{grCR, prAny}:        {grAny, grBoundary, 40},
+	{grControlLF, prAny}: {grAny, grBoundary, 40},
+
+	// GB3.
+	{grCR, prLF}: {grAny, grNoBoundary, 30},
+
+	// GB6.
+	{grAny, prL}: {grL, grBoundary, 9990},
+	{grL, prL}:   {grL, grNoBoundary, 60},
+	{grL, prV}:   {grLVV, grNoBoundary, 60},
+	{grL, prLV}:  {grLVV, grNoBoundary, 60},
+	{grL, prLVT}: {grLVTT, grNoBoundary, 60},
+
+	// GB7.
+	{grAny, prLV}: {grLVV, grBoundary, 9990},
+	{grAny, prV}:  {grLVV, grBoundary, 9990},
+	{grLVV, prV}:  {grLVV, grNoBoundary, 70},
+	{grLVV, prT}:  {grLVTT, grNoBoundary, 70},
+
+	// GB8.
+	{grAny, prLVT}: {grLVTT, grBoundary, 9990},
+	{grAny, prT}:   {grLVTT, grBoundary, 9990},
+	{grLVTT, prT}:  {grLVTT, grNoBoundary, 80},
+
+	// GB9.
+	{grAny, prExtend}: {grAny, grNoBoundary, 90},
+	{grAny, prZWJ}:    {grAny, grNoBoundary, 90},
+
+	// GB9a.
+	{grAny, prSpacingMark}: {grAny, grNoBoundary, 91},
+
+	// GB9b.
+	{grAny, prPreprend}: {grPrepend, grBoundary, 9990},
+	{grPrepend, prAny}:  {grAny, grNoBoundary, 92},
+
+	// GB11.
+	{grAny, prExtendedPictographic}:                     {grExtendedPictographic, grBoundary, 9990},
+	{grExtendedPictographic, prExtend}:                  {grExtendedPictographic, grNoBoundary, 110},
+	{grExtendedPictographic, prZWJ}:                     {grExtendedPictographicZWJ, grNoBoundary, 110},
+	{grExtendedPictographicZWJ, prExtendedPictographic}: {grExtendedPictographic, grNoBoundary, 110},
+
+	// GB12 / GB13.
+	{grAny, prRegionalIndicator}:    {grRIOdd, grBoundary, 9990},
+	{grRIOdd, prRegionalIndicator}:  {grRIEven, grNoBoundary, 120},
+	{grRIEven, prRegionalIndicator}: {grRIOdd, grBoundary, 120},
+}
+
+// Graphemes implements an iterator over Unicode extended grapheme clusters,
+// specified in the Unicode Standard Annex #29. Grapheme clusters correspond to
+// "user-perceived characters". These characters often consist of multiple
+// code points (e.g. the "woman kissing woman" emoji consists of 8 code points:
+// woman + ZWJ + heavy black heart (2 code points) + ZWJ + kiss mark + ZWJ +
+// woman) and the rules described in Annex #29 must be applied to group those
+// code points into clusters perceived by the user as one character.
+type Graphemes struct {
+	// The code points over which this class iterates.
+	codePoints []rune
+
+	// The (byte-based) indices of the code points into the original string plus
+	// len(original string). Thus, len(indices) = len(codePoints) + 1.
+	indices []int
+
+	// The current grapheme cluster to be returned. These are indices into
+	// codePoints/indices. If start == end, we either haven't started iterating
+	// yet (0) or the iteration has already completed (1).
+	start, end int
+
+	// The index of the next code point to be parsed.
+	pos int
+
+	// The current state of the code point parser.
+	state int
+}
+
+// NewGraphemes returns a new grapheme cluster iterator.
+func NewGraphemes(s string) *Graphemes {
+	g := &Graphemes{}
+	for index, codePoint := range s {
+		g.codePoints = append(g.codePoints, codePoint)
+		g.indices = append(g.indices, index)
+	}
+	g.indices = append(g.indices, len(s))
+	g.Next() // Parse ahead.
+	return g
+}
+
+// Next advances the iterator by one grapheme cluster and returns false if no
+// clusters are left. This function must be called before the first cluster is
+// accessed.
+func (g *Graphemes) Next() bool {
+	g.start = g.end
+
+	// The state transition gives us a boundary instruction BEFORE the next code
+	// point so we always need to stay ahead by one code point.
+
+	// Parse the next code point.
+	for g.pos <= len(g.codePoints) {
+		// GB2.
+		if g.pos == len(g.codePoints) {
+			g.end = g.pos
+			g.pos++
+			break
+		}
+
+		// Determine the property of the next character.
+		nextProperty := property(g.codePoints[g.pos])
+		g.pos++
+
+		// Find the applicable transition.
+		var boundary bool
+		transition, ok := grTransitions[[2]int{g.state, nextProperty}]
+		if ok {
+			// We have a specific transition. We'll use it.
+			g.state = transition[0]
+			boundary = transition[1] == grBoundary
+		} else {
+			// No specific transition found. Try the less specific ones.
+			transAnyProp, okAnyProp := grTransitions[[2]int{g.state, prAny}]
+			transAnyState, okAnyState := grTransitions[[2]int{grAny, nextProperty}]
+			if okAnyProp && okAnyState {
+				// Both apply. We'll use a mix (see comments for grTransitions).
+				g.state = transAnyState[0]
+				boundary = transAnyState[1] == grBoundary
+				if transAnyProp[2] < transAnyState[2] {
+					g.state = transAnyProp[0]
+					boundary = transAnyProp[1] == grBoundary
+				}
+			} else if okAnyProp {
+				// We only have a specific state.
+				g.state = transAnyProp[0]
+				boundary = transAnyProp[1] == grBoundary
+				// This branch will probably never be reached because okAnyState will
+				// always be true given the current transition map. But we keep it here
+				// for future modifications to the transition map where this may not be
+				// true anymore.
+			} else if okAnyState {
+				// We only have a specific property.
+				g.state = transAnyState[0]
+				boundary = transAnyState[1] == grBoundary
+			} else {
+				// No known transition. GB999: Any x Any.
+				g.state = grAny
+				boundary = true
+			}
+		}
+
+		// If we found a cluster boundary, let's stop here. The current cluster will
+		// be the one that just ended.
+		if g.pos-1 == 0 /* GB1 */ || boundary {
+			g.end = g.pos - 1
+			break
+		}
+	}
+
+	return g.start != g.end
+}
+
+// Runes returns a slice of runes (code points) which corresponds to the current
+// grapheme cluster. If the iterator is already past the end or Next() has not
+// yet been called, nil is returned.
+func (g *Graphemes) Runes() []rune {
+	if g.start == g.end {
+		return nil
+	}
+	return g.codePoints[g.start:g.end]
+}
+
+// Str returns a substring of the original string which corresponds to the
+// current grapheme cluster. If the iterator is already past the end or Next()
+// has not yet been called, an empty string is returned.
+func (g *Graphemes) Str() string {
+	if g.start == g.end {
+		return ""
+	}
+	return string(g.codePoints[g.start:g.end])
+}
+
+// Bytes returns a byte slice which corresponds to the current grapheme cluster.
+// If the iterator is already past the end or Next() has not yet been called,
+// nil is returned.
+func (g *Graphemes) Bytes() []byte {
+	if g.start == g.end {
+		return nil
+	}
+	return []byte(string(g.codePoints[g.start:g.end]))
+}
+
+// Positions returns the interval of the current grapheme cluster as byte
+// positions into the original string. The first returned value "from" indexes
+// the first byte and the second returned value "to" indexes the first byte that
+// is not included anymore, i.e. str[from:to] is the current grapheme cluster of
+// the original string "str". If Next() has not yet been called, both values are
+// 0. If the iterator is already past the end, both values are 1.
+func (g *Graphemes) Positions() (int, int) {
+	return g.indices[g.start], g.indices[g.end]
+}
+
+// Reset puts the iterator into its initial state such that the next call to
+// Next() sets it to the first grapheme cluster again.
+func (g *Graphemes) Reset() {
+	g.start, g.end, g.pos, g.state = 0, 0, 0, grAny
+	g.Next() // Parse ahead again.
+}
+
+// GraphemeClusterCount returns the number of user-perceived characters
+// (grapheme clusters) for the given string. To calculate this number, it
+// iterates through the string using the Graphemes iterator.
+func GraphemeClusterCount(s string) (n int) {
+	g := NewGraphemes(s)
+	for g.Next() {
+		n++
+	}
+	return
+}
diff --git a/vendor/github.com/rivo/uniseg/properties.go b/vendor/github.com/rivo/uniseg/properties.go
new file mode 100644
index 0000000..a75ab58
--- /dev/null
+++ b/vendor/github.com/rivo/uniseg/properties.go
@@ -0,0 +1,1658 @@
+package uniseg
+
+// The unicode properties. Only the ones needed in the context of this package
+// are included.
+const (
+	prAny = iota
+	prPreprend
+	prCR
+	prLF
+	prControl
+	prExtend
+	prRegionalIndicator
+	prSpacingMark
+	prL
+	prV
+	prT
+	prLV
+	prLVT
+	prZWJ
+	prExtendedPictographic
+)
+
+// Maps code point ranges to their properties. In the context of this package,
+// any code point that is not contained may map to "prAny". The code point
+// ranges in this slice are numerically sorted.
+//
+// These ranges were taken from
+// http://www.unicode.org/Public/UCD/latest/ucd/auxiliary/GraphemeBreakProperty.txt
+// as well as
+// https://unicode.org/Public/emoji/latest/emoji-data.txt
+// ("Extended_Pictographic" only) on March 11, 2019. See
+// https://www.unicode.org/license.html for the Unicode license agreement.
+var codePoints = [][3]int{
+	{0x0000, 0x0009, prControl},                // Cc  [10] <control-0000>..<control-0009>
+	{0x000A, 0x000A, prLF},                     // Cc       <control-000A>
+	{0x000B, 0x000C, prControl},                // Cc   [2] <control-000B>..<control-000C>
+	{0x000D, 0x000D, prCR},                     // Cc       <control-000D>
+	{0x000E, 0x001F, prControl},                // Cc  [18] <control-000E>..<control-001F>
+	{0x007F, 0x009F, prControl},                // Cc  [33] <control-007F>..<control-009F>
+	{0x00A9, 0x00A9, prExtendedPictographic},   //  1.1  [1] (©️)       copyright
+	{0x00AD, 0x00AD, prControl},                // Cf       SOFT HYPHEN
+	{0x00AE, 0x00AE, prExtendedPictographic},   //  1.1  [1] (®️)       registered
+	{0x0300, 0x036F, prExtend},                 // Mn [112] COMBINING GRAVE ACCENT..COMBINING LATIN SMALL LETTER X
+	{0x0483, 0x0487, prExtend},                 // Mn   [5] COMBINING CYRILLIC TITLO..COMBINING CYRILLIC POKRYTIE
+	{0x0488, 0x0489, prExtend},                 // Me   [2] COMBINING CYRILLIC HUNDRED THOUSANDS SIGN..COMBINING CYRILLIC MILLIONS SIGN
+	{0x0591, 0x05BD, prExtend},                 // Mn  [45] HEBREW ACCENT ETNAHTA..HEBREW POINT METEG
+	{0x05BF, 0x05BF, prExtend},                 // Mn       HEBREW POINT RAFE
+	{0x05C1, 0x05C2, prExtend},                 // Mn   [2] HEBREW POINT SHIN DOT..HEBREW POINT SIN DOT
+	{0x05C4, 0x05C5, prExtend},                 // Mn   [2] HEBREW MARK UPPER DOT..HEBREW MARK LOWER DOT
+	{0x05C7, 0x05C7, prExtend},                 // Mn       HEBREW POINT QAMATS QATAN
+	{0x0600, 0x0605, prPreprend},               // Cf   [6] ARABIC NUMBER SIGN..ARABIC NUMBER MARK ABOVE
+	{0x0610, 0x061A, prExtend},                 // Mn  [11] ARABIC SIGN SALLALLAHOU ALAYHE WASSALLAM..ARABIC SMALL KASRA
+	{0x061C, 0x061C, prControl},                // Cf       ARABIC LETTER MARK
+	{0x064B, 0x065F, prExtend},                 // Mn  [21] ARABIC FATHATAN..ARABIC WAVY HAMZA BELOW
+	{0x0670, 0x0670, prExtend},                 // Mn       ARABIC LETTER SUPERSCRIPT ALEF
+	{0x06D6, 0x06DC, prExtend},                 // Mn   [7] ARABIC SMALL HIGH LIGATURE SAD WITH LAM WITH ALEF MAKSURA..ARABIC SMALL HIGH SEEN
+	{0x06DD, 0x06DD, prPreprend},               // Cf       ARABIC END OF AYAH
+	{0x06DF, 0x06E4, prExtend},                 // Mn   [6] ARABIC SMALL HIGH ROUNDED ZERO..ARABIC SMALL HIGH MADDA
+	{0x06E7, 0x06E8, prExtend},                 // Mn   [2] ARABIC SMALL HIGH YEH..ARABIC SMALL HIGH NOON
+	{0x06EA, 0x06ED, prExtend},                 // Mn   [4] ARABIC EMPTY CENTRE LOW STOP..ARABIC SMALL LOW MEEM
+	{0x070F, 0x070F, prPreprend},               // Cf       SYRIAC ABBREVIATION MARK
+	{0x0711, 0x0711, prExtend},                 // Mn       SYRIAC LETTER SUPERSCRIPT ALAPH
+	{0x0730, 0x074A, prExtend},                 // Mn  [27] SYRIAC PTHAHA ABOVE..SYRIAC BARREKH
+	{0x07A6, 0x07B0, prExtend},                 // Mn  [11] THAANA ABAFILI..THAANA SUKUN
+	{0x07EB, 0x07F3, prExtend},                 // Mn   [9] NKO COMBINING SHORT HIGH TONE..NKO COMBINING DOUBLE DOT ABOVE
+	{0x07FD, 0x07FD, prExtend},                 // Mn       NKO DANTAYALAN
+	{0x0816, 0x0819, prExtend},                 // Mn   [4] SAMARITAN MARK IN..SAMARITAN MARK DAGESH
+	{0x081B, 0x0823, prExtend},                 // Mn   [9] SAMARITAN MARK EPENTHETIC YUT..SAMARITAN VOWEL SIGN A
+	{0x0825, 0x0827, prExtend},                 // Mn   [3] SAMARITAN VOWEL SIGN SHORT A..SAMARITAN VOWEL SIGN U
+	{0x0829, 0x082D, prExtend},                 // Mn   [5] SAMARITAN VOWEL SIGN LONG I..SAMARITAN MARK NEQUDAA
+	{0x0859, 0x085B, prExtend},                 // Mn   [3] MANDAIC AFFRICATION MARK..MANDAIC GEMINATION MARK
+	{0x08D3, 0x08E1, prExtend},                 // Mn  [15] ARABIC SMALL LOW WAW..ARABIC SMALL HIGH SIGN SAFHA
+	{0x08E2, 0x08E2, prPreprend},               // Cf       ARABIC DISPUTED END OF AYAH
+	{0x08E3, 0x0902, prExtend},                 // Mn  [32] ARABIC TURNED DAMMA BELOW..DEVANAGARI SIGN ANUSVARA
+	{0x0903, 0x0903, prSpacingMark},            // Mc       DEVANAGARI SIGN VISARGA
+	{0x093A, 0x093A, prExtend},                 // Mn       DEVANAGARI VOWEL SIGN OE
+	{0x093B, 0x093B, prSpacingMark},            // Mc       DEVANAGARI VOWEL SIGN OOE
+	{0x093C, 0x093C, prExtend},                 // Mn       DEVANAGARI SIGN NUKTA
+	{0x093E, 0x0940, prSpacingMark},            // Mc   [3] DEVANAGARI VOWEL SIGN AA..DEVANAGARI VOWEL SIGN II
+	{0x0941, 0x0948, prExtend},                 // Mn   [8] DEVANAGARI VOWEL SIGN U..DEVANAGARI VOWEL SIGN AI
+	{0x0949, 0x094C, prSpacingMark},            // Mc   [4] DEVANAGARI VOWEL SIGN CANDRA O..DEVANAGARI VOWEL SIGN AU
+	{0x094D, 0x094D, prExtend},                 // Mn       DEVANAGARI SIGN VIRAMA
+	{0x094E, 0x094F, prSpacingMark},            // Mc   [2] DEVANAGARI VOWEL SIGN PRISHTHAMATRA E..DEVANAGARI VOWEL SIGN AW
+	{0x0951, 0x0957, prExtend},                 // Mn   [7] DEVANAGARI STRESS SIGN UDATTA..DEVANAGARI VOWEL SIGN UUE
+	{0x0962, 0x0963, prExtend},                 // Mn   [2] DEVANAGARI VOWEL SIGN VOCALIC L..DEVANAGARI VOWEL SIGN VOCALIC LL
+	{0x0981, 0x0981, prExtend},                 // Mn       BENGALI SIGN CANDRABINDU
+	{0x0982, 0x0983, prSpacingMark},            // Mc   [2] BENGALI SIGN ANUSVARA..BENGALI SIGN VISARGA
+	{0x09BC, 0x09BC, prExtend},                 // Mn       BENGALI SIGN NUKTA
+	{0x09BE, 0x09BE, prExtend},                 // Mc       BENGALI VOWEL SIGN AA
+	{0x09BF, 0x09C0, prSpacingMark},            // Mc   [2] BENGALI VOWEL SIGN I..BENGALI VOWEL SIGN II
+	{0x09C1, 0x09C4, prExtend},                 // Mn   [4] BENGALI VOWEL SIGN U..BENGALI VOWEL SIGN VOCALIC RR
+	{0x09C7, 0x09C8, prSpacingMark},            // Mc   [2] BENGALI VOWEL SIGN E..BENGALI VOWEL SIGN AI
+	{0x09CB, 0x09CC, prSpacingMark},            // Mc   [2] BENGALI VOWEL SIGN O..BENGALI VOWEL SIGN AU
+	{0x09CD, 0x09CD, prExtend},                 // Mn       BENGALI SIGN VIRAMA
+	{0x09D7, 0x09D7, prExtend},                 // Mc       BENGALI AU LENGTH MARK
+	{0x09E2, 0x09E3, prExtend},                 // Mn   [2] BENGALI VOWEL SIGN VOCALIC L..BENGALI VOWEL SIGN VOCALIC LL
+	{0x09FE, 0x09FE, prExtend},                 // Mn       BENGALI SANDHI MARK
+	{0x0A01, 0x0A02, prExtend},                 // Mn   [2] GURMUKHI SIGN ADAK BINDI..GURMUKHI SIGN BINDI
+	{0x0A03, 0x0A03, prSpacingMark},            // Mc       GURMUKHI SIGN VISARGA
+	{0x0A3C, 0x0A3C, prExtend},                 // Mn       GURMUKHI SIGN NUKTA
+	{0x0A3E, 0x0A40, prSpacingMark},            // Mc   [3] GURMUKHI VOWEL SIGN AA..GURMUKHI VOWEL SIGN II
+	{0x0A41, 0x0A42, prExtend},                 // Mn   [2] GURMUKHI VOWEL SIGN U..GURMUKHI VOWEL SIGN UU
+	{0x0A47, 0x0A48, prExtend},                 // Mn   [2] GURMUKHI VOWEL SIGN EE..GURMUKHI VOWEL SIGN AI
+	{0x0A4B, 0x0A4D, prExtend},                 // Mn   [3] GURMUKHI VOWEL SIGN OO..GURMUKHI SIGN VIRAMA
+	{0x0A51, 0x0A51, prExtend},                 // Mn       GURMUKHI SIGN UDAAT
+	{0x0A70, 0x0A71, prExtend},                 // Mn   [2] GURMUKHI TIPPI..GURMUKHI ADDAK
+	{0x0A75, 0x0A75, prExtend},                 // Mn       GURMUKHI SIGN YAKASH
+	{0x0A81, 0x0A82, prExtend},                 // Mn   [2] GUJARATI SIGN CANDRABINDU..GUJARATI SIGN ANUSVARA
+	{0x0A83, 0x0A83, prSpacingMark},            // Mc       GUJARATI SIGN VISARGA
+	{0x0ABC, 0x0ABC, prExtend},                 // Mn       GUJARATI SIGN NUKTA
+	{0x0ABE, 0x0AC0, prSpacingMark},            // Mc   [3] GUJARATI VOWEL SIGN AA..GUJARATI VOWEL SIGN II
+	{0x0AC1, 0x0AC5, prExtend},                 // Mn   [5] GUJARATI VOWEL SIGN U..GUJARATI VOWEL SIGN CANDRA E
+	{0x0AC7, 0x0AC8, prExtend},                 // Mn   [2] GUJARATI VOWEL SIGN E..GUJARATI VOWEL SIGN AI
+	{0x0AC9, 0x0AC9, prSpacingMark},            // Mc       GUJARATI VOWEL SIGN CANDRA O
+	{0x0ACB, 0x0ACC, prSpacingMark},            // Mc   [2] GUJARATI VOWEL SIGN O..GUJARATI VOWEL SIGN AU
+	{0x0ACD, 0x0ACD, prExtend},                 // Mn       GUJARATI SIGN VIRAMA
+	{0x0AE2, 0x0AE3, prExtend},                 // Mn   [2] GUJARATI VOWEL SIGN VOCALIC L..GUJARATI VOWEL SIGN VOCALIC LL
+	{0x0AFA, 0x0AFF, prExtend},                 // Mn   [6] GUJARATI SIGN SUKUN..GUJARATI SIGN TWO-CIRCLE NUKTA ABOVE
+	{0x0B01, 0x0B01, prExtend},                 // Mn       ORIYA SIGN CANDRABINDU
+	{0x0B02, 0x0B03, prSpacingMark},            // Mc   [2] ORIYA SIGN ANUSVARA..ORIYA SIGN VISARGA
+	{0x0B3C, 0x0B3C, prExtend},                 // Mn       ORIYA SIGN NUKTA
+	{0x0B3E, 0x0B3E, prExtend},                 // Mc       ORIYA VOWEL SIGN AA
+	{0x0B3F, 0x0B3F, prExtend},                 // Mn       ORIYA VOWEL SIGN I
+	{0x0B40, 0x0B40, prSpacingMark},            // Mc       ORIYA VOWEL SIGN II
+	{0x0B41, 0x0B44, prExtend},                 // Mn   [4] ORIYA VOWEL SIGN U..ORIYA VOWEL SIGN VOCALIC RR
+	{0x0B47, 0x0B48, prSpacingMark},            // Mc   [2] ORIYA VOWEL SIGN E..ORIYA VOWEL SIGN AI
+	{0x0B4B, 0x0B4C, prSpacingMark},            // Mc   [2] ORIYA VOWEL SIGN O..ORIYA VOWEL SIGN AU
+	{0x0B4D, 0x0B4D, prExtend},                 // Mn       ORIYA SIGN VIRAMA
+	{0x0B56, 0x0B56, prExtend},                 // Mn       ORIYA AI LENGTH MARK
+	{0x0B57, 0x0B57, prExtend},                 // Mc       ORIYA AU LENGTH MARK
+	{0x0B62, 0x0B63, prExtend},                 // Mn   [2] ORIYA VOWEL SIGN VOCALIC L..ORIYA VOWEL SIGN VOCALIC LL
+	{0x0B82, 0x0B82, prExtend},                 // Mn       TAMIL SIGN ANUSVARA
+	{0x0BBE, 0x0BBE, prExtend},                 // Mc       TAMIL VOWEL SIGN AA
+	{0x0BBF, 0x0BBF, prSpacingMark},            // Mc       TAMIL VOWEL SIGN I
+	{0x0BC0, 0x0BC0, prExtend},                 // Mn       TAMIL VOWEL SIGN II
+	{0x0BC1, 0x0BC2, prSpacingMark},            // Mc   [2] TAMIL VOWEL SIGN U..TAMIL VOWEL SIGN UU
+	{0x0BC6, 0x0BC8, prSpacingMark},            // Mc   [3] TAMIL VOWEL SIGN E..TAMIL VOWEL SIGN AI
+	{0x0BCA, 0x0BCC, prSpacingMark},            // Mc   [3] TAMIL VOWEL SIGN O..TAMIL VOWEL SIGN AU
+	{0x0BCD, 0x0BCD, prExtend},                 // Mn       TAMIL SIGN VIRAMA
+	{0x0BD7, 0x0BD7, prExtend},                 // Mc       TAMIL AU LENGTH MARK
+	{0x0C00, 0x0C00, prExtend},                 // Mn       TELUGU SIGN COMBINING CANDRABINDU ABOVE
+	{0x0C01, 0x0C03, prSpacingMark},            // Mc   [3] TELUGU SIGN CANDRABINDU..TELUGU SIGN VISARGA
+	{0x0C04, 0x0C04, prExtend},                 // Mn       TELUGU SIGN COMBINING ANUSVARA ABOVE
+	{0x0C3E, 0x0C40, prExtend},                 // Mn   [3] TELUGU VOWEL SIGN AA..TELUGU VOWEL SIGN II
+	{0x0C41, 0x0C44, prSpacingMark},            // Mc   [4] TELUGU VOWEL SIGN U..TELUGU VOWEL SIGN VOCALIC RR
+	{0x0C46, 0x0C48, prExtend},                 // Mn   [3] TELUGU VOWEL SIGN E..TELUGU VOWEL SIGN AI
+	{0x0C4A, 0x0C4D, prExtend},                 // Mn   [4] TELUGU VOWEL SIGN O..TELUGU SIGN VIRAMA
+	{0x0C55, 0x0C56, prExtend},                 // Mn   [2] TELUGU LENGTH MARK..TELUGU AI LENGTH MARK
+	{0x0C62, 0x0C63, prExtend},                 // Mn   [2] TELUGU VOWEL SIGN VOCALIC L..TELUGU VOWEL SIGN VOCALIC LL
+	{0x0C81, 0x0C81, prExtend},                 // Mn       KANNADA SIGN CANDRABINDU
+	{0x0C82, 0x0C83, prSpacingMark},            // Mc   [2] KANNADA SIGN ANUSVARA..KANNADA SIGN VISARGA
+	{0x0CBC, 0x0CBC, prExtend},                 // Mn       KANNADA SIGN NUKTA
+	{0x0CBE, 0x0CBE, prSpacingMark},            // Mc       KANNADA VOWEL SIGN AA
+	{0x0CBF, 0x0CBF, prExtend},                 // Mn       KANNADA VOWEL SIGN I
+	{0x0CC0, 0x0CC1, prSpacingMark},            // Mc   [2] KANNADA VOWEL SIGN II..KANNADA VOWEL SIGN U
+	{0x0CC2, 0x0CC2, prExtend},                 // Mc       KANNADA VOWEL SIGN UU
+	{0x0CC3, 0x0CC4, prSpacingMark},            // Mc   [2] KANNADA VOWEL SIGN VOCALIC R..KANNADA VOWEL SIGN VOCALIC RR
+	{0x0CC6, 0x0CC6, prExtend},                 // Mn       KANNADA VOWEL SIGN E
+	{0x0CC7, 0x0CC8, prSpacingMark},            // Mc   [2] KANNADA VOWEL SIGN EE..KANNADA VOWEL SIGN AI
+	{0x0CCA, 0x0CCB, prSpacingMark},            // Mc   [2] KANNADA VOWEL SIGN O..KANNADA VOWEL SIGN OO
+	{0x0CCC, 0x0CCD, prExtend},                 // Mn   [2] KANNADA VOWEL SIGN AU..KANNADA SIGN VIRAMA
+	{0x0CD5, 0x0CD6, prExtend},                 // Mc   [2] KANNADA LENGTH MARK..KANNADA AI LENGTH MARK
+	{0x0CE2, 0x0CE3, prExtend},                 // Mn   [2] KANNADA VOWEL SIGN VOCALIC L..KANNADA VOWEL SIGN VOCALIC LL
+	{0x0D00, 0x0D01, prExtend},                 // Mn   [2] MALAYALAM SIGN COMBINING ANUSVARA ABOVE..MALAYALAM SIGN CANDRABINDU
+	{0x0D02, 0x0D03, prSpacingMark},            // Mc   [2] MALAYALAM SIGN ANUSVARA..MALAYALAM SIGN VISARGA
+	{0x0D3B, 0x0D3C, prExtend},                 // Mn   [2] MALAYALAM SIGN VERTICAL BAR VIRAMA..MALAYALAM SIGN CIRCULAR VIRAMA
+	{0x0D3E, 0x0D3E, prExtend},                 // Mc       MALAYALAM VOWEL SIGN AA
+	{0x0D3F, 0x0D40, prSpacingMark},            // Mc   [2] MALAYALAM VOWEL SIGN I..MALAYALAM VOWEL SIGN II
+	{0x0D41, 0x0D44, prExtend},                 // Mn   [4] MALAYALAM VOWEL SIGN U..MALAYALAM VOWEL SIGN VOCALIC RR
+	{0x0D46, 0x0D48, prSpacingMark},            // Mc   [3] MALAYALAM VOWEL SIGN E..MALAYALAM VOWEL SIGN AI
+	{0x0D4A, 0x0D4C, prSpacingMark},            // Mc   [3] MALAYALAM VOWEL SIGN O..MALAYALAM VOWEL SIGN AU
+	{0x0D4D, 0x0D4D, prExtend},                 // Mn       MALAYALAM SIGN VIRAMA
+	{0x0D4E, 0x0D4E, prPreprend},               // Lo       MALAYALAM LETTER DOT REPH
+	{0x0D57, 0x0D57, prExtend},                 // Mc       MALAYALAM AU LENGTH MARK
+	{0x0D62, 0x0D63, prExtend},                 // Mn   [2] MALAYALAM VOWEL SIGN VOCALIC L..MALAYALAM VOWEL SIGN VOCALIC LL
+	{0x0D82, 0x0D83, prSpacingMark},            // Mc   [2] SINHALA SIGN ANUSVARAYA..SINHALA SIGN VISARGAYA
+	{0x0DCA, 0x0DCA, prExtend},                 // Mn       SINHALA SIGN AL-LAKUNA
+	{0x0DCF, 0x0DCF, prExtend},                 // Mc       SINHALA VOWEL SIGN AELA-PILLA
+	{0x0DD0, 0x0DD1, prSpacingMark},            // Mc   [2] SINHALA VOWEL SIGN KETTI AEDA-PILLA..SINHALA VOWEL SIGN DIGA AEDA-PILLA
+	{0x0DD2, 0x0DD4, prExtend},                 // Mn   [3] SINHALA VOWEL SIGN KETTI IS-PILLA..SINHALA VOWEL SIGN KETTI PAA-PILLA
+	{0x0DD6, 0x0DD6, prExtend},                 // Mn       SINHALA VOWEL SIGN DIGA PAA-PILLA
+	{0x0DD8, 0x0DDE, prSpacingMark},            // Mc   [7] SINHALA VOWEL SIGN GAETTA-PILLA..SINHALA VOWEL SIGN KOMBUVA HAA GAYANUKITTA
+	{0x0DDF, 0x0DDF, prExtend},                 // Mc       SINHALA VOWEL SIGN GAYANUKITTA
+	{0x0DF2, 0x0DF3, prSpacingMark},            // Mc   [2] SINHALA VOWEL SIGN DIGA GAETTA-PILLA..SINHALA VOWEL SIGN DIGA GAYANUKITTA
+	{0x0E31, 0x0E31, prExtend},                 // Mn       THAI CHARACTER MAI HAN-AKAT
+	{0x0E33, 0x0E33, prSpacingMark},            // Lo       THAI CHARACTER SARA AM
+	{0x0E34, 0x0E3A, prExtend},                 // Mn   [7] THAI CHARACTER SARA I..THAI CHARACTER PHINTHU
+	{0x0E47, 0x0E4E, prExtend},                 // Mn   [8] THAI CHARACTER MAITAIKHU..THAI CHARACTER YAMAKKAN
+	{0x0EB1, 0x0EB1, prExtend},                 // Mn       LAO VOWEL SIGN MAI KAN
+	{0x0EB3, 0x0EB3, prSpacingMark},            // Lo       LAO VOWEL SIGN AM
+	{0x0EB4, 0x0EBC, prExtend},                 // Mn   [9] LAO VOWEL SIGN I..LAO SEMIVOWEL SIGN LO
+	{0x0EC8, 0x0ECD, prExtend},                 // Mn   [6] LAO TONE MAI EK..LAO NIGGAHITA
+	{0x0F18, 0x0F19, prExtend},                 // Mn   [2] TIBETAN ASTROLOGICAL SIGN -KHYUD PA..TIBETAN ASTROLOGICAL SIGN SDONG TSHUGS
+	{0x0F35, 0x0F35, prExtend},                 // Mn       TIBETAN MARK NGAS BZUNG NYI ZLA
+	{0x0F37, 0x0F37, prExtend},                 // Mn       TIBETAN MARK NGAS BZUNG SGOR RTAGS
+	{0x0F39, 0x0F39, prExtend},                 // Mn       TIBETAN MARK TSA -PHRU
+	{0x0F3E, 0x0F3F, prSpacingMark},            // Mc   [2] TIBETAN SIGN YAR TSHES..TIBETAN SIGN MAR TSHES
+	{0x0F71, 0x0F7E, prExtend},                 // Mn  [14] TIBETAN VOWEL SIGN AA..TIBETAN SIGN RJES SU NGA RO
+	{0x0F7F, 0x0F7F, prSpacingMark},            // Mc       TIBETAN SIGN RNAM BCAD
+	{0x0F80, 0x0F84, prExtend},                 // Mn   [5] TIBETAN VOWEL SIGN REVERSED I..TIBETAN MARK HALANTA
+	{0x0F86, 0x0F87, prExtend},                 // Mn   [2] TIBETAN SIGN LCI RTAGS..TIBETAN SIGN YANG RTAGS
+	{0x0F8D, 0x0F97, prExtend},                 // Mn  [11] TIBETAN SUBJOINED SIGN LCE TSA CAN..TIBETAN SUBJOINED LETTER JA
+	{0x0F99, 0x0FBC, prExtend},                 // Mn  [36] TIBETAN SUBJOINED LETTER NYA..TIBETAN SUBJOINED LETTER FIXED-FORM RA
+	{0x0FC6, 0x0FC6, prExtend},                 // Mn       TIBETAN SYMBOL PADMA GDAN
+	{0x102D, 0x1030, prExtend},                 // Mn   [4] MYANMAR VOWEL SIGN I..MYANMAR VOWEL SIGN UU
+	{0x1031, 0x1031, prSpacingMark},            // Mc       MYANMAR VOWEL SIGN E
+	{0x1032, 0x1037, prExtend},                 // Mn   [6] MYANMAR VOWEL SIGN AI..MYANMAR SIGN DOT BELOW
+	{0x1039, 0x103A, prExtend},                 // Mn   [2] MYANMAR SIGN VIRAMA..MYANMAR SIGN ASAT
+	{0x103B, 0x103C, prSpacingMark},            // Mc   [2] MYANMAR CONSONANT SIGN MEDIAL YA..MYANMAR CONSONANT SIGN MEDIAL RA
+	{0x103D, 0x103E, prExtend},                 // Mn   [2] MYANMAR CONSONANT SIGN MEDIAL WA..MYANMAR CONSONANT SIGN MEDIAL HA
+	{0x1056, 0x1057, prSpacingMark},            // Mc   [2] MYANMAR VOWEL SIGN VOCALIC R..MYANMAR VOWEL SIGN VOCALIC RR
+	{0x1058, 0x1059, prExtend},                 // Mn   [2] MYANMAR VOWEL SIGN VOCALIC L..MYANMAR VOWEL SIGN VOCALIC LL
+	{0x105E, 0x1060, prExtend},                 // Mn   [3] MYANMAR CONSONANT SIGN MON MEDIAL NA..MYANMAR CONSONANT SIGN MON MEDIAL LA
+	{0x1071, 0x1074, prExtend},                 // Mn   [4] MYANMAR VOWEL SIGN GEBA KAREN I..MYANMAR VOWEL SIGN KAYAH EE
+	{0x1082, 0x1082, prExtend},                 // Mn       MYANMAR CONSONANT SIGN SHAN MEDIAL WA
+	{0x1084, 0x1084, prSpacingMark},            // Mc       MYANMAR VOWEL SIGN SHAN E
+	{0x1085, 0x1086, prExtend},                 // Mn   [2] MYANMAR VOWEL SIGN SHAN E ABOVE..MYANMAR VOWEL SIGN SHAN FINAL Y
+	{0x108D, 0x108D, prExtend},                 // Mn       MYANMAR SIGN SHAN COUNCIL EMPHATIC TONE
+	{0x109D, 0x109D, prExtend},                 // Mn       MYANMAR VOWEL SIGN AITON AI
+	{0x1100, 0x115F, prL},                      // Lo  [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER
+	{0x1160, 0x11A7, prV},                      // Lo  [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE
+	{0x11A8, 0x11FF, prT},                      // Lo  [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN
+	{0x135D, 0x135F, prExtend},                 // Mn   [3] ETHIOPIC COMBINING GEMINATION AND VOWEL LENGTH MARK..ETHIOPIC COMBINING GEMINATION MARK
+	{0x1712, 0x1714, prExtend},                 // Mn   [3] TAGALOG VOWEL SIGN I..TAGALOG SIGN VIRAMA
+	{0x1732, 0x1734, prExtend},                 // Mn   [3] HANUNOO VOWEL SIGN I..HANUNOO SIGN PAMUDPOD
+	{0x1752, 0x1753, prExtend},                 // Mn   [2] BUHID VOWEL SIGN I..BUHID VOWEL SIGN U
+	{0x1772, 0x1773, prExtend},                 // Mn   [2] TAGBANWA VOWEL SIGN I..TAGBANWA VOWEL SIGN U
+	{0x17B4, 0x17B5, prExtend},                 // Mn   [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
+	{0x17B6, 0x17B6, prSpacingMark},            // Mc       KHMER VOWEL SIGN AA
+	{0x17B7, 0x17BD, prExtend},                 // Mn   [7] KHMER VOWEL SIGN I..KHMER VOWEL SIGN UA
+	{0x17BE, 0x17C5, prSpacingMark},            // Mc   [8] KHMER VOWEL SIGN OE..KHMER VOWEL SIGN AU
+	{0x17C6, 0x17C6, prExtend},                 // Mn       KHMER SIGN NIKAHIT
+	{0x17C7, 0x17C8, prSpacingMark},            // Mc   [2] KHMER SIGN REAHMUK..KHMER SIGN YUUKALEAPINTU
+	{0x17C9, 0x17D3, prExtend},                 // Mn  [11] KHMER SIGN MUUSIKATOAN..KHMER SIGN BATHAMASAT
+	{0x17DD, 0x17DD, prExtend},                 // Mn       KHMER SIGN ATTHACAN
+	{0x180B, 0x180D, prExtend},                 // Mn   [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
+	{0x180E, 0x180E, prControl},                // Cf       MONGOLIAN VOWEL SEPARATOR
+	{0x1885, 0x1886, prExtend},                 // Mn   [2] MONGOLIAN LETTER ALI GALI BALUDA..MONGOLIAN LETTER ALI GALI THREE BALUDA
+	{0x18A9, 0x18A9, prExtend},                 // Mn       MONGOLIAN LETTER ALI GALI DAGALGA
+	{0x1920, 0x1922, prExtend},                 // Mn   [3] LIMBU VOWEL SIGN A..LIMBU VOWEL SIGN U
+	{0x1923, 0x1926, prSpacingMark},            // Mc   [4] LIMBU VOWEL SIGN EE..LIMBU VOWEL SIGN AU
+	{0x1927, 0x1928, prExtend},                 // Mn   [2] LIMBU VOWEL SIGN E..LIMBU VOWEL SIGN O
+	{0x1929, 0x192B, prSpacingMark},            // Mc   [3] LIMBU SUBJOINED LETTER YA..LIMBU SUBJOINED LETTER WA
+	{0x1930, 0x1931, prSpacingMark},            // Mc   [2] LIMBU SMALL LETTER KA..LIMBU SMALL LETTER NGA
+	{0x1932, 0x1932, prExtend},                 // Mn       LIMBU SMALL LETTER ANUSVARA
+	{0x1933, 0x1938, prSpacingMark},            // Mc   [6] LIMBU SMALL LETTER TA..LIMBU SMALL LETTER LA
+	{0x1939, 0x193B, prExtend},                 // Mn   [3] LIMBU SIGN MUKPHRENG..LIMBU SIGN SA-I
+	{0x1A17, 0x1A18, prExtend},                 // Mn   [2] BUGINESE VOWEL SIGN I..BUGINESE VOWEL SIGN U
+	{0x1A19, 0x1A1A, prSpacingMark},            // Mc   [2] BUGINESE VOWEL SIGN E..BUGINESE VOWEL SIGN O
+	{0x1A1B, 0x1A1B, prExtend},                 // Mn       BUGINESE VOWEL SIGN AE
+	{0x1A55, 0x1A55, prSpacingMark},            // Mc       TAI THAM CONSONANT SIGN MEDIAL RA
+	{0x1A56, 0x1A56, prExtend},                 // Mn       TAI THAM CONSONANT SIGN MEDIAL LA
+	{0x1A57, 0x1A57, prSpacingMark},            // Mc       TAI THAM CONSONANT SIGN LA TANG LAI
+	{0x1A58, 0x1A5E, prExtend},                 // Mn   [7] TAI THAM SIGN MAI KANG LAI..TAI THAM CONSONANT SIGN SA
+	{0x1A60, 0x1A60, prExtend},                 // Mn       TAI THAM SIGN SAKOT
+	{0x1A62, 0x1A62, prExtend},                 // Mn       TAI THAM VOWEL SIGN MAI SAT
+	{0x1A65, 0x1A6C, prExtend},                 // Mn   [8] TAI THAM VOWEL SIGN I..TAI THAM VOWEL SIGN OA BELOW
+	{0x1A6D, 0x1A72, prSpacingMark},            // Mc   [6] TAI THAM VOWEL SIGN OY..TAI THAM VOWEL SIGN THAM AI
+	{0x1A73, 0x1A7C, prExtend},                 // Mn  [10] TAI THAM VOWEL SIGN OA ABOVE..TAI THAM SIGN KHUEN-LUE KARAN
+	{0x1A7F, 0x1A7F, prExtend},                 // Mn       TAI THAM COMBINING CRYPTOGRAMMIC DOT
+	{0x1AB0, 0x1ABD, prExtend},                 // Mn  [14] COMBINING DOUBLED CIRCUMFLEX ACCENT..COMBINING PARENTHESES BELOW
+	{0x1ABE, 0x1ABE, prExtend},                 // Me       COMBINING PARENTHESES OVERLAY
+	{0x1B00, 0x1B03, prExtend},                 // Mn   [4] BALINESE SIGN ULU RICEM..BALINESE SIGN SURANG
+	{0x1B04, 0x1B04, prSpacingMark},            // Mc       BALINESE SIGN BISAH
+	{0x1B34, 0x1B34, prExtend},                 // Mn       BALINESE SIGN REREKAN
+	{0x1B35, 0x1B35, prExtend},                 // Mc       BALINESE VOWEL SIGN TEDUNG
+	{0x1B36, 0x1B3A, prExtend},                 // Mn   [5] BALINESE VOWEL SIGN ULU..BALINESE VOWEL SIGN RA REPA
+	{0x1B3B, 0x1B3B, prSpacingMark},            // Mc       BALINESE VOWEL SIGN RA REPA TEDUNG
+	{0x1B3C, 0x1B3C, prExtend},                 // Mn       BALINESE VOWEL SIGN LA LENGA
+	{0x1B3D, 0x1B41, prSpacingMark},            // Mc   [5] BALINESE VOWEL SIGN LA LENGA TEDUNG..BALINESE VOWEL SIGN TALING REPA TEDUNG
+	{0x1B42, 0x1B42, prExtend},                 // Mn       BALINESE VOWEL SIGN PEPET
+	{0x1B43, 0x1B44, prSpacingMark},            // Mc   [2] BALINESE VOWEL SIGN PEPET TEDUNG..BALINESE ADEG ADEG
+	{0x1B6B, 0x1B73, prExtend},                 // Mn   [9] BALINESE MUSICAL SYMBOL COMBINING TEGEH..BALINESE MUSICAL SYMBOL COMBINING GONG
+	{0x1B80, 0x1B81, prExtend},                 // Mn   [2] SUNDANESE SIGN PANYECEK..SUNDANESE SIGN PANGLAYAR
+	{0x1B82, 0x1B82, prSpacingMark},            // Mc       SUNDANESE SIGN PANGWISAD
+	{0x1BA1, 0x1BA1, prSpacingMark},            // Mc       SUNDANESE CONSONANT SIGN PAMINGKAL
+	{0x1BA2, 0x1BA5, prExtend},                 // Mn   [4] SUNDANESE CONSONANT SIGN PANYAKRA..SUNDANESE VOWEL SIGN PANYUKU
+	{0x1BA6, 0x1BA7, prSpacingMark},            // Mc   [2] SUNDANESE VOWEL SIGN PANAELAENG..SUNDANESE VOWEL SIGN PANOLONG
+	{0x1BA8, 0x1BA9, prExtend},                 // Mn   [2] SUNDANESE VOWEL SIGN PAMEPET..SUNDANESE VOWEL SIGN PANEULEUNG
+	{0x1BAA, 0x1BAA, prSpacingMark},            // Mc       SUNDANESE SIGN PAMAAEH
+	{0x1BAB, 0x1BAD, prExtend},                 // Mn   [3] SUNDANESE SIGN VIRAMA..SUNDANESE CONSONANT SIGN PASANGAN WA
+	{0x1BE6, 0x1BE6, prExtend},                 // Mn       BATAK SIGN TOMPI
+	{0x1BE7, 0x1BE7, prSpacingMark},            // Mc       BATAK VOWEL SIGN E
+	{0x1BE8, 0x1BE9, prExtend},                 // Mn   [2] BATAK VOWEL SIGN PAKPAK E..BATAK VOWEL SIGN EE
+	{0x1BEA, 0x1BEC, prSpacingMark},            // Mc   [3] BATAK VOWEL SIGN I..BATAK VOWEL SIGN O
+	{0x1BED, 0x1BED, prExtend},                 // Mn       BATAK VOWEL SIGN KARO O
+	{0x1BEE, 0x1BEE, prSpacingMark},            // Mc       BATAK VOWEL SIGN U
+	{0x1BEF, 0x1BF1, prExtend},                 // Mn   [3] BATAK VOWEL SIGN U FOR SIMALUNGUN SA..BATAK CONSONANT SIGN H
+	{0x1BF2, 0x1BF3, prSpacingMark},            // Mc   [2] BATAK PANGOLAT..BATAK PANONGONAN
+	{0x1C24, 0x1C2B, prSpacingMark},            // Mc   [8] LEPCHA SUBJOINED LETTER YA..LEPCHA VOWEL SIGN UU
+	{0x1C2C, 0x1C33, prExtend},                 // Mn   [8] LEPCHA VOWEL SIGN E..LEPCHA CONSONANT SIGN T
+	{0x1C34, 0x1C35, prSpacingMark},            // Mc   [2] LEPCHA CONSONANT SIGN NYIN-DO..LEPCHA CONSONANT SIGN KANG
+	{0x1C36, 0x1C37, prExtend},                 // Mn   [2] LEPCHA SIGN RAN..LEPCHA SIGN NUKTA
+	{0x1CD0, 0x1CD2, prExtend},                 // Mn   [3] VEDIC TONE KARSHANA..VEDIC TONE PRENKHA
+	{0x1CD4, 0x1CE0, prExtend},                 // Mn  [13] VEDIC SIGN YAJURVEDIC MIDLINE SVARITA..VEDIC TONE RIGVEDIC KASHMIRI INDEPENDENT SVARITA
+	{0x1CE1, 0x1CE1, prSpacingMark},            // Mc       VEDIC TONE ATHARVAVEDIC INDEPENDENT SVARITA
+	{0x1CE2, 0x1CE8, prExtend},                 // Mn   [7] VEDIC SIGN VISARGA SVARITA..VEDIC SIGN VISARGA ANUDATTA WITH TAIL
+	{0x1CED, 0x1CED, prExtend},                 // Mn       VEDIC SIGN TIRYAK
+	{0x1CF4, 0x1CF4, prExtend},                 // Mn       VEDIC TONE CANDRA ABOVE
+	{0x1CF7, 0x1CF7, prSpacingMark},            // Mc       VEDIC SIGN ATIKRAMA
+	{0x1CF8, 0x1CF9, prExtend},                 // Mn   [2] VEDIC TONE RING ABOVE..VEDIC TONE DOUBLE RING ABOVE
+	{0x1DC0, 0x1DF9, prExtend},                 // Mn  [58] COMBINING DOTTED GRAVE ACCENT..COMBINING WIDE INVERTED BRIDGE BELOW
+	{0x1DFB, 0x1DFF, prExtend},                 // Mn   [5] COMBINING DELETION MARK..COMBINING RIGHT ARROWHEAD AND DOWN ARROWHEAD BELOW
+	{0x200B, 0x200B, prControl},                // Cf       ZERO WIDTH SPACE
+	{0x200C, 0x200C, prExtend},                 // Cf       ZERO WIDTH NON-JOINER
+	{0x200D, 0x200D, prZWJ},                    // Cf       ZERO WIDTH JOINER
+	{0x200E, 0x200F, prControl},                // Cf   [2] LEFT-TO-RIGHT MARK..RIGHT-TO-LEFT MARK
+	{0x2028, 0x2028, prControl},                // Zl       LINE SEPARATOR
+	{0x2029, 0x2029, prControl},                // Zp       PARAGRAPH SEPARATOR
+	{0x202A, 0x202E, prControl},                // Cf   [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
+	{0x203C, 0x203C, prExtendedPictographic},   //  1.1  [1] (‼️)       double exclamation mark
+	{0x2049, 0x2049, prExtendedPictographic},   //  3.0  [1] (⁉️)       exclamation question mark
+	{0x2060, 0x2064, prControl},                // Cf   [5] WORD JOINER..INVISIBLE PLUS
+	{0x2065, 0x2065, prControl},                // Cn       <reserved-2065>
+	{0x2066, 0x206F, prControl},                // Cf  [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
+	{0x20D0, 0x20DC, prExtend},                 // Mn  [13] COMBINING LEFT HARPOON ABOVE..COMBINING FOUR DOTS ABOVE
+	{0x20DD, 0x20E0, prExtend},                 // Me   [4] COMBINING ENCLOSING CIRCLE..COMBINING ENCLOSING CIRCLE BACKSLASH
+	{0x20E1, 0x20E1, prExtend},                 // Mn       COMBINING LEFT RIGHT ARROW ABOVE
+	{0x20E2, 0x20E4, prExtend},                 // Me   [3] COMBINING ENCLOSING SCREEN..COMBINING ENCLOSING UPWARD POINTING TRIANGLE
+	{0x20E5, 0x20F0, prExtend},                 // Mn  [12] COMBINING REVERSE SOLIDUS OVERLAY..COMBINING ASTERISK ABOVE
+	{0x2122, 0x2122, prExtendedPictographic},   //  1.1  [1] (™️)       trade mark
+	{0x2139, 0x2139, prExtendedPictographic},   //  3.0  [1] (ℹ️)       information
+	{0x2194, 0x2199, prExtendedPictographic},   //  1.1  [6] (↔️..↙️)    left-right arrow..down-left arrow
+	{0x21A9, 0x21AA, prExtendedPictographic},   //  1.1  [2] (↩️..↪️)    right arrow curving left..left arrow curving right
+	{0x231A, 0x231B, prExtendedPictographic},   //  1.1  [2] (⌚..⌛)    watch..hourglass done
+	{0x2328, 0x2328, prExtendedPictographic},   //  1.1  [1] (⌨️)       keyboard
+	{0x2388, 0x2388, prExtendedPictographic},   //  3.0  [1] (⎈)       HELM SYMBOL
+	{0x23CF, 0x23CF, prExtendedPictographic},   //  4.0  [1] (⏏️)       eject button
+	{0x23E9, 0x23F3, prExtendedPictographic},   //  6.0 [11] (⏩..⏳)    fast-forward button..hourglass not done
+	{0x23F8, 0x23FA, prExtendedPictographic},   //  7.0  [3] (⏸️..⏺️)    pause button..record button
+	{0x24C2, 0x24C2, prExtendedPictographic},   //  1.1  [1] (Ⓜ️)       circled M
+	{0x25AA, 0x25AB, prExtendedPictographic},   //  1.1  [2] (▪️..▫️)    black small square..white small square
+	{0x25B6, 0x25B6, prExtendedPictographic},   //  1.1  [1] (▶️)       play button
+	{0x25C0, 0x25C0, prExtendedPictographic},   //  1.1  [1] (◀️)       reverse button
+	{0x25FB, 0x25FE, prExtendedPictographic},   //  3.2  [4] (◻️..◾)    white medium square..black medium-small square
+	{0x2600, 0x2605, prExtendedPictographic},   //  1.1  [6] (☀️..★)    sun..BLACK STAR
+	{0x2607, 0x2612, prExtendedPictographic},   //  1.1 [12] (☇..☒)    LIGHTNING..BALLOT BOX WITH X
+	{0x2614, 0x2615, prExtendedPictographic},   //  4.0  [2] (☔..☕)    umbrella with rain drops..hot beverage
+	{0x2616, 0x2617, prExtendedPictographic},   //  3.2  [2] (☖..☗)    WHITE SHOGI PIECE..BLACK SHOGI PIECE
+	{0x2618, 0x2618, prExtendedPictographic},   //  4.1  [1] (☘️)       shamrock
+	{0x2619, 0x2619, prExtendedPictographic},   //  3.0  [1] (☙)       REVERSED ROTATED FLORAL HEART BULLET
+	{0x261A, 0x266F, prExtendedPictographic},   //  1.1 [86] (☚..♯)    BLACK LEFT POINTING INDEX..MUSIC SHARP SIGN
+	{0x2670, 0x2671, prExtendedPictographic},   //  3.0  [2] (♰..♱)    WEST SYRIAC CROSS..EAST SYRIAC CROSS
+	{0x2672, 0x267D, prExtendedPictographic},   //  3.2 [12] (♲..♽)    UNIVERSAL RECYCLING SYMBOL..PARTIALLY-RECYCLED PAPER SYMBOL
+	{0x267E, 0x267F, prExtendedPictographic},   //  4.1  [2] (♾️..♿)    infinity..wheelchair symbol
+	{0x2680, 0x2685, prExtendedPictographic},   //  3.2  [6] (⚀..⚅)    DIE FACE-1..DIE FACE-6
+	{0x2690, 0x2691, prExtendedPictographic},   //  4.0  [2] (⚐..⚑)    WHITE FLAG..BLACK FLAG
+	{0x2692, 0x269C, prExtendedPictographic},   //  4.1 [11] (⚒️..⚜️)    hammer and pick..fleur-de-lis
+	{0x269D, 0x269D, prExtendedPictographic},   //  5.1  [1] (⚝)       OUTLINED WHITE STAR
+	{0x269E, 0x269F, prExtendedPictographic},   //  5.2  [2] (⚞..⚟)    THREE LINES CONVERGING RIGHT..THREE LINES CONVERGING LEFT
+	{0x26A0, 0x26A1, prExtendedPictographic},   //  4.0  [2] (⚠️..⚡)    warning..high voltage
+	{0x26A2, 0x26B1, prExtendedPictographic},   //  4.1 [16] (⚢..⚱️)    DOUBLED FEMALE SIGN..funeral urn
+	{0x26B2, 0x26B2, prExtendedPictographic},   //  5.0  [1] (⚲)       NEUTER
+	{0x26B3, 0x26BC, prExtendedPictographic},   //  5.1 [10] (⚳..⚼)    CERES..SESQUIQUADRATE
+	{0x26BD, 0x26BF, prExtendedPictographic},   //  5.2  [3] (⚽..⚿)    soccer ball..SQUARED KEY
+	{0x26C0, 0x26C3, prExtendedPictographic},   //  5.1  [4] (⛀..⛃)    WHITE DRAUGHTS MAN..BLACK DRAUGHTS KING
+	{0x26C4, 0x26CD, prExtendedPictographic},   //  5.2 [10] (⛄..⛍)    snowman without snow..DISABLED CAR
+	{0x26CE, 0x26CE, prExtendedPictographic},   //  6.0  [1] (⛎)       Ophiuchus
+	{0x26CF, 0x26E1, prExtendedPictographic},   //  5.2 [19] (⛏️..⛡)    pick..RESTRICTED LEFT ENTRY-2
+	{0x26E2, 0x26E2, prExtendedPictographic},   //  6.0  [1] (⛢)       ASTRONOMICAL SYMBOL FOR URANUS
+	{0x26E3, 0x26E3, prExtendedPictographic},   //  5.2  [1] (⛣)       HEAVY CIRCLE WITH STROKE AND TWO DOTS ABOVE
+	{0x26E4, 0x26E7, prExtendedPictographic},   //  6.0  [4] (⛤..⛧)    PENTAGRAM..INVERTED PENTAGRAM
+	{0x26E8, 0x26FF, prExtendedPictographic},   //  5.2 [24] (⛨..⛿)    BLACK CROSS ON SHIELD..WHITE FLAG WITH HORIZONTAL MIDDLE BLACK STRIPE
+	{0x2700, 0x2700, prExtendedPictographic},   //  7.0  [1] (✀)       BLACK SAFETY SCISSORS
+	{0x2701, 0x2704, prExtendedPictographic},   //  1.1  [4] (✁..✄)    UPPER BLADE SCISSORS..WHITE SCISSORS
+	{0x2705, 0x2705, prExtendedPictographic},   //  6.0  [1] (✅)       check mark button
+	{0x2708, 0x2709, prExtendedPictographic},   //  1.1  [2] (✈️..✉️)    airplane..envelope
+	{0x270A, 0x270B, prExtendedPictographic},   //  6.0  [2] (✊..✋)    raised fist..raised hand
+	{0x270C, 0x2712, prExtendedPictographic},   //  1.1  [7] (✌️..✒️)    victory hand..black nib
+	{0x2714, 0x2714, prExtendedPictographic},   //  1.1  [1] (✔️)       check mark
+	{0x2716, 0x2716, prExtendedPictographic},   //  1.1  [1] (✖️)       multiplication sign
+	{0x271D, 0x271D, prExtendedPictographic},   //  1.1  [1] (✝️)       latin cross
+	{0x2721, 0x2721, prExtendedPictographic},   //  1.1  [1] (✡️)       star of David
+	{0x2728, 0x2728, prExtendedPictographic},   //  6.0  [1] (✨)       sparkles
+	{0x2733, 0x2734, prExtendedPictographic},   //  1.1  [2] (✳️..✴️)    eight-spoked asterisk..eight-pointed star
+	{0x2744, 0x2744, prExtendedPictographic},   //  1.1  [1] (❄️)       snowflake
+	{0x2747, 0x2747, prExtendedPictographic},   //  1.1  [1] (❇️)       sparkle
+	{0x274C, 0x274C, prExtendedPictographic},   //  6.0  [1] (❌)       cross mark
+	{0x274E, 0x274E, prExtendedPictographic},   //  6.0  [1] (❎)       cross mark button
+	{0x2753, 0x2755, prExtendedPictographic},   //  6.0  [3] (❓..❕)    question mark..white exclamation mark
+	{0x2757, 0x2757, prExtendedPictographic},   //  5.2  [1] (❗)       exclamation mark
+	{0x2763, 0x2767, prExtendedPictographic},   //  1.1  [5] (❣️..❧)    heart exclamation..ROTATED FLORAL HEART BULLET
+	{0x2795, 0x2797, prExtendedPictographic},   //  6.0  [3] (➕..➗)    plus sign..division sign
+	{0x27A1, 0x27A1, prExtendedPictographic},   //  1.1  [1] (➡️)       right arrow
+	{0x27B0, 0x27B0, prExtendedPictographic},   //  6.0  [1] (➰)       curly loop
+	{0x27BF, 0x27BF, prExtendedPictographic},   //  6.0  [1] (➿)       double curly loop
+	{0x2934, 0x2935, prExtendedPictographic},   //  3.2  [2] (⤴️..⤵️)    right arrow curving up..right arrow curving down
+	{0x2B05, 0x2B07, prExtendedPictographic},   //  4.0  [3] (⬅️..⬇️)    left arrow..down arrow
+	{0x2B1B, 0x2B1C, prExtendedPictographic},   //  5.1  [2] (⬛..⬜)    black large square..white large square
+	{0x2B50, 0x2B50, prExtendedPictographic},   //  5.1  [1] (⭐)       star
+	{0x2B55, 0x2B55, prExtendedPictographic},   //  5.2  [1] (⭕)       hollow red circle
+	{0x2CEF, 0x2CF1, prExtend},                 // Mn   [3] COPTIC COMBINING NI ABOVE..COPTIC COMBINING SPIRITUS LENIS
+	{0x2D7F, 0x2D7F, prExtend},                 // Mn       TIFINAGH CONSONANT JOINER
+	{0x2DE0, 0x2DFF, prExtend},                 // Mn  [32] COMBINING CYRILLIC LETTER BE..COMBINING CYRILLIC LETTER IOTIFIED BIG YUS
+	{0x302A, 0x302D, prExtend},                 // Mn   [4] IDEOGRAPHIC LEVEL TONE MARK..IDEOGRAPHIC ENTERING TONE MARK
+	{0x302E, 0x302F, prExtend},                 // Mc   [2] HANGUL SINGLE DOT TONE MARK..HANGUL DOUBLE DOT TONE MARK
+	{0x3030, 0x3030, prExtendedPictographic},   //  1.1  [1] (〰️)       wavy dash
+	{0x303D, 0x303D, prExtendedPictographic},   //  3.2  [1] (〽️)       part alternation mark
+	{0x3099, 0x309A, prExtend},                 // Mn   [2] COMBINING KATAKANA-HIRAGANA VOICED SOUND MARK..COMBINING KATAKANA-HIRAGANA SEMI-VOICED SOUND MARK
+	{0x3297, 0x3297, prExtendedPictographic},   //  1.1  [1] (㊗️)       Japanese “congratulations” button
+	{0x3299, 0x3299, prExtendedPictographic},   //  1.1  [1] (㊙️)       Japanese “secret” button
+	{0xA66F, 0xA66F, prExtend},                 // Mn       COMBINING CYRILLIC VZMET
+	{0xA670, 0xA672, prExtend},                 // Me   [3] COMBINING CYRILLIC TEN MILLIONS SIGN..COMBINING CYRILLIC THOUSAND MILLIONS SIGN
+	{0xA674, 0xA67D, prExtend},                 // Mn  [10] COMBINING CYRILLIC LETTER UKRAINIAN IE..COMBINING CYRILLIC PAYEROK
+	{0xA69E, 0xA69F, prExtend},                 // Mn   [2] COMBINING CYRILLIC LETTER EF..COMBINING CYRILLIC LETTER IOTIFIED E
+	{0xA6F0, 0xA6F1, prExtend},                 // Mn   [2] BAMUM COMBINING MARK KOQNDON..BAMUM COMBINING MARK TUKWENTIS
+	{0xA802, 0xA802, prExtend},                 // Mn       SYLOTI NAGRI SIGN DVISVARA
+	{0xA806, 0xA806, prExtend},                 // Mn       SYLOTI NAGRI SIGN HASANTA
+	{0xA80B, 0xA80B, prExtend},                 // Mn       SYLOTI NAGRI SIGN ANUSVARA
+	{0xA823, 0xA824, prSpacingMark},            // Mc   [2] SYLOTI NAGRI VOWEL SIGN A..SYLOTI NAGRI VOWEL SIGN I
+	{0xA825, 0xA826, prExtend},                 // Mn   [2] SYLOTI NAGRI VOWEL SIGN U..SYLOTI NAGRI VOWEL SIGN E
+	{0xA827, 0xA827, prSpacingMark},            // Mc       SYLOTI NAGRI VOWEL SIGN OO
+	{0xA880, 0xA881, prSpacingMark},            // Mc   [2] SAURASHTRA SIGN ANUSVARA..SAURASHTRA SIGN VISARGA
+	{0xA8B4, 0xA8C3, prSpacingMark},            // Mc  [16] SAURASHTRA CONSONANT SIGN HAARU..SAURASHTRA VOWEL SIGN AU
+	{0xA8C4, 0xA8C5, prExtend},                 // Mn   [2] SAURASHTRA SIGN VIRAMA..SAURASHTRA SIGN CANDRABINDU
+	{0xA8E0, 0xA8F1, prExtend},                 // Mn  [18] COMBINING DEVANAGARI DIGIT ZERO..COMBINING DEVANAGARI SIGN AVAGRAHA
+	{0xA8FF, 0xA8FF, prExtend},                 // Mn       DEVANAGARI VOWEL SIGN AY
+	{0xA926, 0xA92D, prExtend},                 // Mn   [8] KAYAH LI VOWEL UE..KAYAH LI TONE CALYA PLOPHU
+	{0xA947, 0xA951, prExtend},                 // Mn  [11] REJANG VOWEL SIGN I..REJANG CONSONANT SIGN R
+	{0xA952, 0xA953, prSpacingMark},            // Mc   [2] REJANG CONSONANT SIGN H..REJANG VIRAMA
+	{0xA960, 0xA97C, prL},                      // Lo  [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH
+	{0xA980, 0xA982, prExtend},                 // Mn   [3] JAVANESE SIGN PANYANGGA..JAVANESE SIGN LAYAR
+	{0xA983, 0xA983, prSpacingMark},            // Mc       JAVANESE SIGN WIGNYAN
+	{0xA9B3, 0xA9B3, prExtend},                 // Mn       JAVANESE SIGN CECAK TELU
+	{0xA9B4, 0xA9B5, prSpacingMark},            // Mc   [2] JAVANESE VOWEL SIGN TARUNG..JAVANESE VOWEL SIGN TOLONG
+	{0xA9B6, 0xA9B9, prExtend},                 // Mn   [4] JAVANESE VOWEL SIGN WULU..JAVANESE VOWEL SIGN SUKU MENDUT
+	{0xA9BA, 0xA9BB, prSpacingMark},            // Mc   [2] JAVANESE VOWEL SIGN TALING..JAVANESE VOWEL SIGN DIRGA MURE
+	{0xA9BC, 0xA9BD, prExtend},                 // Mn   [2] JAVANESE VOWEL SIGN PEPET..JAVANESE CONSONANT SIGN KERET
+	{0xA9BE, 0xA9C0, prSpacingMark},            // Mc   [3] JAVANESE CONSONANT SIGN PENGKAL..JAVANESE PANGKON
+	{0xA9E5, 0xA9E5, prExtend},                 // Mn       MYANMAR SIGN SHAN SAW
+	{0xAA29, 0xAA2E, prExtend},                 // Mn   [6] CHAM VOWEL SIGN AA..CHAM VOWEL SIGN OE
+	{0xAA2F, 0xAA30, prSpacingMark},            // Mc   [2] CHAM VOWEL SIGN O..CHAM VOWEL SIGN AI
+	{0xAA31, 0xAA32, prExtend},                 // Mn   [2] CHAM VOWEL SIGN AU..CHAM VOWEL SIGN UE
+	{0xAA33, 0xAA34, prSpacingMark},            // Mc   [2] CHAM CONSONANT SIGN YA..CHAM CONSONANT SIGN RA
+	{0xAA35, 0xAA36, prExtend},                 // Mn   [2] CHAM CONSONANT SIGN LA..CHAM CONSONANT SIGN WA
+	{0xAA43, 0xAA43, prExtend},                 // Mn       CHAM CONSONANT SIGN FINAL NG
+	{0xAA4C, 0xAA4C, prExtend},                 // Mn       CHAM CONSONANT SIGN FINAL M
+	{0xAA4D, 0xAA4D, prSpacingMark},            // Mc       CHAM CONSONANT SIGN FINAL H
+	{0xAA7C, 0xAA7C, prExtend},                 // Mn       MYANMAR SIGN TAI LAING TONE-2
+	{0xAAB0, 0xAAB0, prExtend},                 // Mn       TAI VIET MAI KANG
+	{0xAAB2, 0xAAB4, prExtend},                 // Mn   [3] TAI VIET VOWEL I..TAI VIET VOWEL U
+	{0xAAB7, 0xAAB8, prExtend},                 // Mn   [2] TAI VIET MAI KHIT..TAI VIET VOWEL IA
+	{0xAABE, 0xAABF, prExtend},                 // Mn   [2] TAI VIET VOWEL AM..TAI VIET TONE MAI EK
+	{0xAAC1, 0xAAC1, prExtend},                 // Mn       TAI VIET TONE MAI THO
+	{0xAAEB, 0xAAEB, prSpacingMark},            // Mc       MEETEI MAYEK VOWEL SIGN II
+	{0xAAEC, 0xAAED, prExtend},                 // Mn   [2] MEETEI MAYEK VOWEL SIGN UU..MEETEI MAYEK VOWEL SIGN AAI
+	{0xAAEE, 0xAAEF, prSpacingMark},            // Mc   [2] MEETEI MAYEK VOWEL SIGN AU..MEETEI MAYEK VOWEL SIGN AAU
+	{0xAAF5, 0xAAF5, prSpacingMark},            // Mc       MEETEI MAYEK VOWEL SIGN VISARGA
+	{0xAAF6, 0xAAF6, prExtend},                 // Mn       MEETEI MAYEK VIRAMA
+	{0xABE3, 0xABE4, prSpacingMark},            // Mc   [2] MEETEI MAYEK VOWEL SIGN ONAP..MEETEI MAYEK VOWEL SIGN INAP
+	{0xABE5, 0xABE5, prExtend},                 // Mn       MEETEI MAYEK VOWEL SIGN ANAP
+	{0xABE6, 0xABE7, prSpacingMark},            // Mc   [2] MEETEI MAYEK VOWEL SIGN YENAP..MEETEI MAYEK VOWEL SIGN SOUNAP
+	{0xABE8, 0xABE8, prExtend},                 // Mn       MEETEI MAYEK VOWEL SIGN UNAP
+	{0xABE9, 0xABEA, prSpacingMark},            // Mc   [2] MEETEI MAYEK VOWEL SIGN CHEINAP..MEETEI MAYEK VOWEL SIGN NUNG
+	{0xABEC, 0xABEC, prSpacingMark},            // Mc       MEETEI MAYEK LUM IYEK
+	{0xABED, 0xABED, prExtend},                 // Mn       MEETEI MAYEK APUN IYEK
+	{0xAC00, 0xAC00, prLV},                     // Lo       HANGUL SYLLABLE GA
+	{0xAC01, 0xAC1B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH
+	{0xAC1C, 0xAC1C, prLV},                     // Lo       HANGUL SYLLABLE GAE
+	{0xAC1D, 0xAC37, prLVT},                    // Lo  [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH
+	{0xAC38, 0xAC38, prLV},                     // Lo       HANGUL SYLLABLE GYA
+	{0xAC39, 0xAC53, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH
+	{0xAC54, 0xAC54, prLV},                     // Lo       HANGUL SYLLABLE GYAE
+	{0xAC55, 0xAC6F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH
+	{0xAC70, 0xAC70, prLV},                     // Lo       HANGUL SYLLABLE GEO
+	{0xAC71, 0xAC8B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH
+	{0xAC8C, 0xAC8C, prLV},                     // Lo       HANGUL SYLLABLE GE
+	{0xAC8D, 0xACA7, prLVT},                    // Lo  [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH
+	{0xACA8, 0xACA8, prLV},                     // Lo       HANGUL SYLLABLE GYEO
+	{0xACA9, 0xACC3, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH
+	{0xACC4, 0xACC4, prLV},                     // Lo       HANGUL SYLLABLE GYE
+	{0xACC5, 0xACDF, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH
+	{0xACE0, 0xACE0, prLV},                     // Lo       HANGUL SYLLABLE GO
+	{0xACE1, 0xACFB, prLVT},                    // Lo  [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH
+	{0xACFC, 0xACFC, prLV},                     // Lo       HANGUL SYLLABLE GWA
+	{0xACFD, 0xAD17, prLVT},                    // Lo  [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH
+	{0xAD18, 0xAD18, prLV},                     // Lo       HANGUL SYLLABLE GWAE
+	{0xAD19, 0xAD33, prLVT},                    // Lo  [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH
+	{0xAD34, 0xAD34, prLV},                     // Lo       HANGUL SYLLABLE GOE
+	{0xAD35, 0xAD4F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH
+	{0xAD50, 0xAD50, prLV},                     // Lo       HANGUL SYLLABLE GYO
+	{0xAD51, 0xAD6B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH
+	{0xAD6C, 0xAD6C, prLV},                     // Lo       HANGUL SYLLABLE GU
+	{0xAD6D, 0xAD87, prLVT},                    // Lo  [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH
+	{0xAD88, 0xAD88, prLV},                     // Lo       HANGUL SYLLABLE GWEO
+	{0xAD89, 0xADA3, prLVT},                    // Lo  [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH
+	{0xADA4, 0xADA4, prLV},                     // Lo       HANGUL SYLLABLE GWE
+	{0xADA5, 0xADBF, prLVT},                    // Lo  [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH
+	{0xADC0, 0xADC0, prLV},                     // Lo       HANGUL SYLLABLE GWI
+	{0xADC1, 0xADDB, prLVT},                    // Lo  [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH
+	{0xADDC, 0xADDC, prLV},                     // Lo       HANGUL SYLLABLE GYU
+	{0xADDD, 0xADF7, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH
+	{0xADF8, 0xADF8, prLV},                     // Lo       HANGUL SYLLABLE GEU
+	{0xADF9, 0xAE13, prLVT},                    // Lo  [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH
+	{0xAE14, 0xAE14, prLV},                     // Lo       HANGUL SYLLABLE GYI
+	{0xAE15, 0xAE2F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH
+	{0xAE30, 0xAE30, prLV},                     // Lo       HANGUL SYLLABLE GI
+	{0xAE31, 0xAE4B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH
+	{0xAE4C, 0xAE4C, prLV},                     // Lo       HANGUL SYLLABLE GGA
+	{0xAE4D, 0xAE67, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH
+	{0xAE68, 0xAE68, prLV},                     // Lo       HANGUL SYLLABLE GGAE
+	{0xAE69, 0xAE83, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH
+	{0xAE84, 0xAE84, prLV},                     // Lo       HANGUL SYLLABLE GGYA
+	{0xAE85, 0xAE9F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH
+	{0xAEA0, 0xAEA0, prLV},                     // Lo       HANGUL SYLLABLE GGYAE
+	{0xAEA1, 0xAEBB, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH
+	{0xAEBC, 0xAEBC, prLV},                     // Lo       HANGUL SYLLABLE GGEO
+	{0xAEBD, 0xAED7, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH
+	{0xAED8, 0xAED8, prLV},                     // Lo       HANGUL SYLLABLE GGE
+	{0xAED9, 0xAEF3, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH
+	{0xAEF4, 0xAEF4, prLV},                     // Lo       HANGUL SYLLABLE GGYEO
+	{0xAEF5, 0xAF0F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH
+	{0xAF10, 0xAF10, prLV},                     // Lo       HANGUL SYLLABLE GGYE
+	{0xAF11, 0xAF2B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH
+	{0xAF2C, 0xAF2C, prLV},                     // Lo       HANGUL SYLLABLE GGO
+	{0xAF2D, 0xAF47, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH
+	{0xAF48, 0xAF48, prLV},                     // Lo       HANGUL SYLLABLE GGWA
+	{0xAF49, 0xAF63, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH
+	{0xAF64, 0xAF64, prLV},                     // Lo       HANGUL SYLLABLE GGWAE
+	{0xAF65, 0xAF7F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH
+	{0xAF80, 0xAF80, prLV},                     // Lo       HANGUL SYLLABLE GGOE
+	{0xAF81, 0xAF9B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH
+	{0xAF9C, 0xAF9C, prLV},                     // Lo       HANGUL SYLLABLE GGYO
+	{0xAF9D, 0xAFB7, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH
+	{0xAFB8, 0xAFB8, prLV},                     // Lo       HANGUL SYLLABLE GGU
+	{0xAFB9, 0xAFD3, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH
+	{0xAFD4, 0xAFD4, prLV},                     // Lo       HANGUL SYLLABLE GGWEO
+	{0xAFD5, 0xAFEF, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH
+	{0xAFF0, 0xAFF0, prLV},                     // Lo       HANGUL SYLLABLE GGWE
+	{0xAFF1, 0xB00B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH
+	{0xB00C, 0xB00C, prLV},                     // Lo       HANGUL SYLLABLE GGWI
+	{0xB00D, 0xB027, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH
+	{0xB028, 0xB028, prLV},                     // Lo       HANGUL SYLLABLE GGYU
+	{0xB029, 0xB043, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH
+	{0xB044, 0xB044, prLV},                     // Lo       HANGUL SYLLABLE GGEU
+	{0xB045, 0xB05F, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH
+	{0xB060, 0xB060, prLV},                     // Lo       HANGUL SYLLABLE GGYI
+	{0xB061, 0xB07B, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH
+	{0xB07C, 0xB07C, prLV},                     // Lo       HANGUL SYLLABLE GGI
+	{0xB07D, 0xB097, prLVT},                    // Lo  [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH
+	{0xB098, 0xB098, prLV},                     // Lo       HANGUL SYLLABLE NA
+	{0xB099, 0xB0B3, prLVT},                    // Lo  [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH
+	{0xB0B4, 0xB0B4, prLV},                     // Lo       HANGUL SYLLABLE NAE
+	{0xB0B5, 0xB0CF, prLVT},                    // Lo  [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH
+	{0xB0D0, 0xB0D0, prLV},                     // Lo       HANGUL SYLLABLE NYA
+	{0xB0D1, 0xB0EB, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH
+	{0xB0EC, 0xB0EC, prLV},                     // Lo       HANGUL SYLLABLE NYAE
+	{0xB0ED, 0xB107, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH
+	{0xB108, 0xB108, prLV},                     // Lo       HANGUL SYLLABLE NEO
+	{0xB109, 0xB123, prLVT},                    // Lo  [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH
+	{0xB124, 0xB124, prLV},                     // Lo       HANGUL SYLLABLE NE
+	{0xB125, 0xB13F, prLVT},                    // Lo  [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH
+	{0xB140, 0xB140, prLV},                     // Lo       HANGUL SYLLABLE NYEO
+	{0xB141, 0xB15B, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH
+	{0xB15C, 0xB15C, prLV},                     // Lo       HANGUL SYLLABLE NYE
+	{0xB15D, 0xB177, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH
+	{0xB178, 0xB178, prLV},                     // Lo       HANGUL SYLLABLE NO
+	{0xB179, 0xB193, prLVT},                    // Lo  [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH
+	{0xB194, 0xB194, prLV},                     // Lo       HANGUL SYLLABLE NWA
+	{0xB195, 0xB1AF, prLVT},                    // Lo  [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH
+	{0xB1B0, 0xB1B0, prLV},                     // Lo       HANGUL SYLLABLE NWAE
+	{0xB1B1, 0xB1CB, prLVT},                    // Lo  [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH
+	{0xB1CC, 0xB1CC, prLV},                     // Lo       HANGUL SYLLABLE NOE
+	{0xB1CD, 0xB1E7, prLVT},                    // Lo  [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH
+	{0xB1E8, 0xB1E8, prLV},                     // Lo       HANGUL SYLLABLE NYO
+	{0xB1E9, 0xB203, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH
+	{0xB204, 0xB204, prLV},                     // Lo       HANGUL SYLLABLE NU
+	{0xB205, 0xB21F, prLVT},                    // Lo  [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH
+	{0xB220, 0xB220, prLV},                     // Lo       HANGUL SYLLABLE NWEO
+	{0xB221, 0xB23B, prLVT},                    // Lo  [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH
+	{0xB23C, 0xB23C, prLV},                     // Lo       HANGUL SYLLABLE NWE
+	{0xB23D, 0xB257, prLVT},                    // Lo  [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH
+	{0xB258, 0xB258, prLV},                     // Lo       HANGUL SYLLABLE NWI
+	{0xB259, 0xB273, prLVT},                    // Lo  [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH
+	{0xB274, 0xB274, prLV},                     // Lo       HANGUL SYLLABLE NYU
+	{0xB275, 0xB28F, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH
+	{0xB290, 0xB290, prLV},                     // Lo       HANGUL SYLLABLE NEU
+	{0xB291, 0xB2AB, prLVT},                    // Lo  [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH
+	{0xB2AC, 0xB2AC, prLV},                     // Lo       HANGUL SYLLABLE NYI
+	{0xB2AD, 0xB2C7, prLVT},                    // Lo  [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH
+	{0xB2C8, 0xB2C8, prLV},                     // Lo       HANGUL SYLLABLE NI
+	{0xB2C9, 0xB2E3, prLVT},                    // Lo  [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH
+	{0xB2E4, 0xB2E4, prLV},                     // Lo       HANGUL SYLLABLE DA
+	{0xB2E5, 0xB2FF, prLVT},                    // Lo  [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH
+	{0xB300, 0xB300, prLV},                     // Lo       HANGUL SYLLABLE DAE
+	{0xB301, 0xB31B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH
+	{0xB31C, 0xB31C, prLV},                     // Lo       HANGUL SYLLABLE DYA
+	{0xB31D, 0xB337, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH
+	{0xB338, 0xB338, prLV},                     // Lo       HANGUL SYLLABLE DYAE
+	{0xB339, 0xB353, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH
+	{0xB354, 0xB354, prLV},                     // Lo       HANGUL SYLLABLE DEO
+	{0xB355, 0xB36F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH
+	{0xB370, 0xB370, prLV},                     // Lo       HANGUL SYLLABLE DE
+	{0xB371, 0xB38B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH
+	{0xB38C, 0xB38C, prLV},                     // Lo       HANGUL SYLLABLE DYEO
+	{0xB38D, 0xB3A7, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH
+	{0xB3A8, 0xB3A8, prLV},                     // Lo       HANGUL SYLLABLE DYE
+	{0xB3A9, 0xB3C3, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH
+	{0xB3C4, 0xB3C4, prLV},                     // Lo       HANGUL SYLLABLE DO
+	{0xB3C5, 0xB3DF, prLVT},                    // Lo  [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH
+	{0xB3E0, 0xB3E0, prLV},                     // Lo       HANGUL SYLLABLE DWA
+	{0xB3E1, 0xB3FB, prLVT},                    // Lo  [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH
+	{0xB3FC, 0xB3FC, prLV},                     // Lo       HANGUL SYLLABLE DWAE
+	{0xB3FD, 0xB417, prLVT},                    // Lo  [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH
+	{0xB418, 0xB418, prLV},                     // Lo       HANGUL SYLLABLE DOE
+	{0xB419, 0xB433, prLVT},                    // Lo  [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH
+	{0xB434, 0xB434, prLV},                     // Lo       HANGUL SYLLABLE DYO
+	{0xB435, 0xB44F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH
+	{0xB450, 0xB450, prLV},                     // Lo       HANGUL SYLLABLE DU
+	{0xB451, 0xB46B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH
+	{0xB46C, 0xB46C, prLV},                     // Lo       HANGUL SYLLABLE DWEO
+	{0xB46D, 0xB487, prLVT},                    // Lo  [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH
+	{0xB488, 0xB488, prLV},                     // Lo       HANGUL SYLLABLE DWE
+	{0xB489, 0xB4A3, prLVT},                    // Lo  [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH
+	{0xB4A4, 0xB4A4, prLV},                     // Lo       HANGUL SYLLABLE DWI
+	{0xB4A5, 0xB4BF, prLVT},                    // Lo  [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH
+	{0xB4C0, 0xB4C0, prLV},                     // Lo       HANGUL SYLLABLE DYU
+	{0xB4C1, 0xB4DB, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH
+	{0xB4DC, 0xB4DC, prLV},                     // Lo       HANGUL SYLLABLE DEU
+	{0xB4DD, 0xB4F7, prLVT},                    // Lo  [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH
+	{0xB4F8, 0xB4F8, prLV},                     // Lo       HANGUL SYLLABLE DYI
+	{0xB4F9, 0xB513, prLVT},                    // Lo  [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH
+	{0xB514, 0xB514, prLV},                     // Lo       HANGUL SYLLABLE DI
+	{0xB515, 0xB52F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH
+	{0xB530, 0xB530, prLV},                     // Lo       HANGUL SYLLABLE DDA
+	{0xB531, 0xB54B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH
+	{0xB54C, 0xB54C, prLV},                     // Lo       HANGUL SYLLABLE DDAE
+	{0xB54D, 0xB567, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH
+	{0xB568, 0xB568, prLV},                     // Lo       HANGUL SYLLABLE DDYA
+	{0xB569, 0xB583, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH
+	{0xB584, 0xB584, prLV},                     // Lo       HANGUL SYLLABLE DDYAE
+	{0xB585, 0xB59F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH
+	{0xB5A0, 0xB5A0, prLV},                     // Lo       HANGUL SYLLABLE DDEO
+	{0xB5A1, 0xB5BB, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH
+	{0xB5BC, 0xB5BC, prLV},                     // Lo       HANGUL SYLLABLE DDE
+	{0xB5BD, 0xB5D7, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH
+	{0xB5D8, 0xB5D8, prLV},                     // Lo       HANGUL SYLLABLE DDYEO
+	{0xB5D9, 0xB5F3, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH
+	{0xB5F4, 0xB5F4, prLV},                     // Lo       HANGUL SYLLABLE DDYE
+	{0xB5F5, 0xB60F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH
+	{0xB610, 0xB610, prLV},                     // Lo       HANGUL SYLLABLE DDO
+	{0xB611, 0xB62B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH
+	{0xB62C, 0xB62C, prLV},                     // Lo       HANGUL SYLLABLE DDWA
+	{0xB62D, 0xB647, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH
+	{0xB648, 0xB648, prLV},                     // Lo       HANGUL SYLLABLE DDWAE
+	{0xB649, 0xB663, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH
+	{0xB664, 0xB664, prLV},                     // Lo       HANGUL SYLLABLE DDOE
+	{0xB665, 0xB67F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH
+	{0xB680, 0xB680, prLV},                     // Lo       HANGUL SYLLABLE DDYO
+	{0xB681, 0xB69B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH
+	{0xB69C, 0xB69C, prLV},                     // Lo       HANGUL SYLLABLE DDU
+	{0xB69D, 0xB6B7, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH
+	{0xB6B8, 0xB6B8, prLV},                     // Lo       HANGUL SYLLABLE DDWEO
+	{0xB6B9, 0xB6D3, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH
+	{0xB6D4, 0xB6D4, prLV},                     // Lo       HANGUL SYLLABLE DDWE
+	{0xB6D5, 0xB6EF, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH
+	{0xB6F0, 0xB6F0, prLV},                     // Lo       HANGUL SYLLABLE DDWI
+	{0xB6F1, 0xB70B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH
+	{0xB70C, 0xB70C, prLV},                     // Lo       HANGUL SYLLABLE DDYU
+	{0xB70D, 0xB727, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH
+	{0xB728, 0xB728, prLV},                     // Lo       HANGUL SYLLABLE DDEU
+	{0xB729, 0xB743, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH
+	{0xB744, 0xB744, prLV},                     // Lo       HANGUL SYLLABLE DDYI
+	{0xB745, 0xB75F, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH
+	{0xB760, 0xB760, prLV},                     // Lo       HANGUL SYLLABLE DDI
+	{0xB761, 0xB77B, prLVT},                    // Lo  [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH
+	{0xB77C, 0xB77C, prLV},                     // Lo       HANGUL SYLLABLE RA
+	{0xB77D, 0xB797, prLVT},                    // Lo  [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH
+	{0xB798, 0xB798, prLV},                     // Lo       HANGUL SYLLABLE RAE
+	{0xB799, 0xB7B3, prLVT},                    // Lo  [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH
+	{0xB7B4, 0xB7B4, prLV},                     // Lo       HANGUL SYLLABLE RYA
+	{0xB7B5, 0xB7CF, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH
+	{0xB7D0, 0xB7D0, prLV},                     // Lo       HANGUL SYLLABLE RYAE
+	{0xB7D1, 0xB7EB, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH
+	{0xB7EC, 0xB7EC, prLV},                     // Lo       HANGUL SYLLABLE REO
+	{0xB7ED, 0xB807, prLVT},                    // Lo  [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH
+	{0xB808, 0xB808, prLV},                     // Lo       HANGUL SYLLABLE RE
+	{0xB809, 0xB823, prLVT},                    // Lo  [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH
+	{0xB824, 0xB824, prLV},                     // Lo       HANGUL SYLLABLE RYEO
+	{0xB825, 0xB83F, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH
+	{0xB840, 0xB840, prLV},                     // Lo       HANGUL SYLLABLE RYE
+	{0xB841, 0xB85B, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH
+	{0xB85C, 0xB85C, prLV},                     // Lo       HANGUL SYLLABLE RO
+	{0xB85D, 0xB877, prLVT},                    // Lo  [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH
+	{0xB878, 0xB878, prLV},                     // Lo       HANGUL SYLLABLE RWA
+	{0xB879, 0xB893, prLVT},                    // Lo  [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH
+	{0xB894, 0xB894, prLV},                     // Lo       HANGUL SYLLABLE RWAE
+	{0xB895, 0xB8AF, prLVT},                    // Lo  [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH
+	{0xB8B0, 0xB8B0, prLV},                     // Lo       HANGUL SYLLABLE ROE
+	{0xB8B1, 0xB8CB, prLVT},                    // Lo  [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH
+	{0xB8CC, 0xB8CC, prLV},                     // Lo       HANGUL SYLLABLE RYO
+	{0xB8CD, 0xB8E7, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH
+	{0xB8E8, 0xB8E8, prLV},                     // Lo       HANGUL SYLLABLE RU
+	{0xB8E9, 0xB903, prLVT},                    // Lo  [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH
+	{0xB904, 0xB904, prLV},                     // Lo       HANGUL SYLLABLE RWEO
+	{0xB905, 0xB91F, prLVT},                    // Lo  [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH
+	{0xB920, 0xB920, prLV},                     // Lo       HANGUL SYLLABLE RWE
+	{0xB921, 0xB93B, prLVT},                    // Lo  [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH
+	{0xB93C, 0xB93C, prLV},                     // Lo       HANGUL SYLLABLE RWI
+	{0xB93D, 0xB957, prLVT},                    // Lo  [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH
+	{0xB958, 0xB958, prLV},                     // Lo       HANGUL SYLLABLE RYU
+	{0xB959, 0xB973, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH
+	{0xB974, 0xB974, prLV},                     // Lo       HANGUL SYLLABLE REU
+	{0xB975, 0xB98F, prLVT},                    // Lo  [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH
+	{0xB990, 0xB990, prLV},                     // Lo       HANGUL SYLLABLE RYI
+	{0xB991, 0xB9AB, prLVT},                    // Lo  [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH
+	{0xB9AC, 0xB9AC, prLV},                     // Lo       HANGUL SYLLABLE RI
+	{0xB9AD, 0xB9C7, prLVT},                    // Lo  [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH
+	{0xB9C8, 0xB9C8, prLV},                     // Lo       HANGUL SYLLABLE MA
+	{0xB9C9, 0xB9E3, prLVT},                    // Lo  [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH
+	{0xB9E4, 0xB9E4, prLV},                     // Lo       HANGUL SYLLABLE MAE
+	{0xB9E5, 0xB9FF, prLVT},                    // Lo  [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH
+	{0xBA00, 0xBA00, prLV},                     // Lo       HANGUL SYLLABLE MYA
+	{0xBA01, 0xBA1B, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH
+	{0xBA1C, 0xBA1C, prLV},                     // Lo       HANGUL SYLLABLE MYAE
+	{0xBA1D, 0xBA37, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH
+	{0xBA38, 0xBA38, prLV},                     // Lo       HANGUL SYLLABLE MEO
+	{0xBA39, 0xBA53, prLVT},                    // Lo  [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH
+	{0xBA54, 0xBA54, prLV},                     // Lo       HANGUL SYLLABLE ME
+	{0xBA55, 0xBA6F, prLVT},                    // Lo  [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH
+	{0xBA70, 0xBA70, prLV},                     // Lo       HANGUL SYLLABLE MYEO
+	{0xBA71, 0xBA8B, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH
+	{0xBA8C, 0xBA8C, prLV},                     // Lo       HANGUL SYLLABLE MYE
+	{0xBA8D, 0xBAA7, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH
+	{0xBAA8, 0xBAA8, prLV},                     // Lo       HANGUL SYLLABLE MO
+	{0xBAA9, 0xBAC3, prLVT},                    // Lo  [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH
+	{0xBAC4, 0xBAC4, prLV},                     // Lo       HANGUL SYLLABLE MWA
+	{0xBAC5, 0xBADF, prLVT},                    // Lo  [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH
+	{0xBAE0, 0xBAE0, prLV},                     // Lo       HANGUL SYLLABLE MWAE
+	{0xBAE1, 0xBAFB, prLVT},                    // Lo  [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH
+	{0xBAFC, 0xBAFC, prLV},                     // Lo       HANGUL SYLLABLE MOE
+	{0xBAFD, 0xBB17, prLVT},                    // Lo  [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH
+	{0xBB18, 0xBB18, prLV},                     // Lo       HANGUL SYLLABLE MYO
+	{0xBB19, 0xBB33, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH
+	{0xBB34, 0xBB34, prLV},                     // Lo       HANGUL SYLLABLE MU
+	{0xBB35, 0xBB4F, prLVT},                    // Lo  [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH
+	{0xBB50, 0xBB50, prLV},                     // Lo       HANGUL SYLLABLE MWEO
+	{0xBB51, 0xBB6B, prLVT},                    // Lo  [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH
+	{0xBB6C, 0xBB6C, prLV},                     // Lo       HANGUL SYLLABLE MWE
+	{0xBB6D, 0xBB87, prLVT},                    // Lo  [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH
+	{0xBB88, 0xBB88, prLV},                     // Lo       HANGUL SYLLABLE MWI
+	{0xBB89, 0xBBA3, prLVT},                    // Lo  [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH
+	{0xBBA4, 0xBBA4, prLV},                     // Lo       HANGUL SYLLABLE MYU
+	{0xBBA5, 0xBBBF, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH
+	{0xBBC0, 0xBBC0, prLV},                     // Lo       HANGUL SYLLABLE MEU
+	{0xBBC1, 0xBBDB, prLVT},                    // Lo  [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH
+	{0xBBDC, 0xBBDC, prLV},                     // Lo       HANGUL SYLLABLE MYI
+	{0xBBDD, 0xBBF7, prLVT},                    // Lo  [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH
+	{0xBBF8, 0xBBF8, prLV},                     // Lo       HANGUL SYLLABLE MI
+	{0xBBF9, 0xBC13, prLVT},                    // Lo  [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH
+	{0xBC14, 0xBC14, prLV},                     // Lo       HANGUL SYLLABLE BA
+	{0xBC15, 0xBC2F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH
+	{0xBC30, 0xBC30, prLV},                     // Lo       HANGUL SYLLABLE BAE
+	{0xBC31, 0xBC4B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH
+	{0xBC4C, 0xBC4C, prLV},                     // Lo       HANGUL SYLLABLE BYA
+	{0xBC4D, 0xBC67, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH
+	{0xBC68, 0xBC68, prLV},                     // Lo       HANGUL SYLLABLE BYAE
+	{0xBC69, 0xBC83, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH
+	{0xBC84, 0xBC84, prLV},                     // Lo       HANGUL SYLLABLE BEO
+	{0xBC85, 0xBC9F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH
+	{0xBCA0, 0xBCA0, prLV},                     // Lo       HANGUL SYLLABLE BE
+	{0xBCA1, 0xBCBB, prLVT},                    // Lo  [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH
+	{0xBCBC, 0xBCBC, prLV},                     // Lo       HANGUL SYLLABLE BYEO
+	{0xBCBD, 0xBCD7, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH
+	{0xBCD8, 0xBCD8, prLV},                     // Lo       HANGUL SYLLABLE BYE
+	{0xBCD9, 0xBCF3, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH
+	{0xBCF4, 0xBCF4, prLV},                     // Lo       HANGUL SYLLABLE BO
+	{0xBCF5, 0xBD0F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH
+	{0xBD10, 0xBD10, prLV},                     // Lo       HANGUL SYLLABLE BWA
+	{0xBD11, 0xBD2B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH
+	{0xBD2C, 0xBD2C, prLV},                     // Lo       HANGUL SYLLABLE BWAE
+	{0xBD2D, 0xBD47, prLVT},                    // Lo  [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH
+	{0xBD48, 0xBD48, prLV},                     // Lo       HANGUL SYLLABLE BOE
+	{0xBD49, 0xBD63, prLVT},                    // Lo  [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH
+	{0xBD64, 0xBD64, prLV},                     // Lo       HANGUL SYLLABLE BYO
+	{0xBD65, 0xBD7F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH
+	{0xBD80, 0xBD80, prLV},                     // Lo       HANGUL SYLLABLE BU
+	{0xBD81, 0xBD9B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH
+	{0xBD9C, 0xBD9C, prLV},                     // Lo       HANGUL SYLLABLE BWEO
+	{0xBD9D, 0xBDB7, prLVT},                    // Lo  [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH
+	{0xBDB8, 0xBDB8, prLV},                     // Lo       HANGUL SYLLABLE BWE
+	{0xBDB9, 0xBDD3, prLVT},                    // Lo  [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH
+	{0xBDD4, 0xBDD4, prLV},                     // Lo       HANGUL SYLLABLE BWI
+	{0xBDD5, 0xBDEF, prLVT},                    // Lo  [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH
+	{0xBDF0, 0xBDF0, prLV},                     // Lo       HANGUL SYLLABLE BYU
+	{0xBDF1, 0xBE0B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH
+	{0xBE0C, 0xBE0C, prLV},                     // Lo       HANGUL SYLLABLE BEU
+	{0xBE0D, 0xBE27, prLVT},                    // Lo  [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH
+	{0xBE28, 0xBE28, prLV},                     // Lo       HANGUL SYLLABLE BYI
+	{0xBE29, 0xBE43, prLVT},                    // Lo  [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH
+	{0xBE44, 0xBE44, prLV},                     // Lo       HANGUL SYLLABLE BI
+	{0xBE45, 0xBE5F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH
+	{0xBE60, 0xBE60, prLV},                     // Lo       HANGUL SYLLABLE BBA
+	{0xBE61, 0xBE7B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH
+	{0xBE7C, 0xBE7C, prLV},                     // Lo       HANGUL SYLLABLE BBAE
+	{0xBE7D, 0xBE97, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH
+	{0xBE98, 0xBE98, prLV},                     // Lo       HANGUL SYLLABLE BBYA
+	{0xBE99, 0xBEB3, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH
+	{0xBEB4, 0xBEB4, prLV},                     // Lo       HANGUL SYLLABLE BBYAE
+	{0xBEB5, 0xBECF, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH
+	{0xBED0, 0xBED0, prLV},                     // Lo       HANGUL SYLLABLE BBEO
+	{0xBED1, 0xBEEB, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH
+	{0xBEEC, 0xBEEC, prLV},                     // Lo       HANGUL SYLLABLE BBE
+	{0xBEED, 0xBF07, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH
+	{0xBF08, 0xBF08, prLV},                     // Lo       HANGUL SYLLABLE BBYEO
+	{0xBF09, 0xBF23, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH
+	{0xBF24, 0xBF24, prLV},                     // Lo       HANGUL SYLLABLE BBYE
+	{0xBF25, 0xBF3F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH
+	{0xBF40, 0xBF40, prLV},                     // Lo       HANGUL SYLLABLE BBO
+	{0xBF41, 0xBF5B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH
+	{0xBF5C, 0xBF5C, prLV},                     // Lo       HANGUL SYLLABLE BBWA
+	{0xBF5D, 0xBF77, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH
+	{0xBF78, 0xBF78, prLV},                     // Lo       HANGUL SYLLABLE BBWAE
+	{0xBF79, 0xBF93, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH
+	{0xBF94, 0xBF94, prLV},                     // Lo       HANGUL SYLLABLE BBOE
+	{0xBF95, 0xBFAF, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH
+	{0xBFB0, 0xBFB0, prLV},                     // Lo       HANGUL SYLLABLE BBYO
+	{0xBFB1, 0xBFCB, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH
+	{0xBFCC, 0xBFCC, prLV},                     // Lo       HANGUL SYLLABLE BBU
+	{0xBFCD, 0xBFE7, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH
+	{0xBFE8, 0xBFE8, prLV},                     // Lo       HANGUL SYLLABLE BBWEO
+	{0xBFE9, 0xC003, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH
+	{0xC004, 0xC004, prLV},                     // Lo       HANGUL SYLLABLE BBWE
+	{0xC005, 0xC01F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH
+	{0xC020, 0xC020, prLV},                     // Lo       HANGUL SYLLABLE BBWI
+	{0xC021, 0xC03B, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH
+	{0xC03C, 0xC03C, prLV},                     // Lo       HANGUL SYLLABLE BBYU
+	{0xC03D, 0xC057, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH
+	{0xC058, 0xC058, prLV},                     // Lo       HANGUL SYLLABLE BBEU
+	{0xC059, 0xC073, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH
+	{0xC074, 0xC074, prLV},                     // Lo       HANGUL SYLLABLE BBYI
+	{0xC075, 0xC08F, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH
+	{0xC090, 0xC090, prLV},                     // Lo       HANGUL SYLLABLE BBI
+	{0xC091, 0xC0AB, prLVT},                    // Lo  [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH
+	{0xC0AC, 0xC0AC, prLV},                     // Lo       HANGUL SYLLABLE SA
+	{0xC0AD, 0xC0C7, prLVT},                    // Lo  [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH
+	{0xC0C8, 0xC0C8, prLV},                     // Lo       HANGUL SYLLABLE SAE
+	{0xC0C9, 0xC0E3, prLVT},                    // Lo  [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH
+	{0xC0E4, 0xC0E4, prLV},                     // Lo       HANGUL SYLLABLE SYA
+	{0xC0E5, 0xC0FF, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH
+	{0xC100, 0xC100, prLV},                     // Lo       HANGUL SYLLABLE SYAE
+	{0xC101, 0xC11B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH
+	{0xC11C, 0xC11C, prLV},                     // Lo       HANGUL SYLLABLE SEO
+	{0xC11D, 0xC137, prLVT},                    // Lo  [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH
+	{0xC138, 0xC138, prLV},                     // Lo       HANGUL SYLLABLE SE
+	{0xC139, 0xC153, prLVT},                    // Lo  [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH
+	{0xC154, 0xC154, prLV},                     // Lo       HANGUL SYLLABLE SYEO
+	{0xC155, 0xC16F, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH
+	{0xC170, 0xC170, prLV},                     // Lo       HANGUL SYLLABLE SYE
+	{0xC171, 0xC18B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH
+	{0xC18C, 0xC18C, prLV},                     // Lo       HANGUL SYLLABLE SO
+	{0xC18D, 0xC1A7, prLVT},                    // Lo  [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH
+	{0xC1A8, 0xC1A8, prLV},                     // Lo       HANGUL SYLLABLE SWA
+	{0xC1A9, 0xC1C3, prLVT},                    // Lo  [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH
+	{0xC1C4, 0xC1C4, prLV},                     // Lo       HANGUL SYLLABLE SWAE
+	{0xC1C5, 0xC1DF, prLVT},                    // Lo  [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH
+	{0xC1E0, 0xC1E0, prLV},                     // Lo       HANGUL SYLLABLE SOE
+	{0xC1E1, 0xC1FB, prLVT},                    // Lo  [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH
+	{0xC1FC, 0xC1FC, prLV},                     // Lo       HANGUL SYLLABLE SYO
+	{0xC1FD, 0xC217, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH
+	{0xC218, 0xC218, prLV},                     // Lo       HANGUL SYLLABLE SU
+	{0xC219, 0xC233, prLVT},                    // Lo  [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH
+	{0xC234, 0xC234, prLV},                     // Lo       HANGUL SYLLABLE SWEO
+	{0xC235, 0xC24F, prLVT},                    // Lo  [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH
+	{0xC250, 0xC250, prLV},                     // Lo       HANGUL SYLLABLE SWE
+	{0xC251, 0xC26B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH
+	{0xC26C, 0xC26C, prLV},                     // Lo       HANGUL SYLLABLE SWI
+	{0xC26D, 0xC287, prLVT},                    // Lo  [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH
+	{0xC288, 0xC288, prLV},                     // Lo       HANGUL SYLLABLE SYU
+	{0xC289, 0xC2A3, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH
+	{0xC2A4, 0xC2A4, prLV},                     // Lo       HANGUL SYLLABLE SEU
+	{0xC2A5, 0xC2BF, prLVT},                    // Lo  [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH
+	{0xC2C0, 0xC2C0, prLV},                     // Lo       HANGUL SYLLABLE SYI
+	{0xC2C1, 0xC2DB, prLVT},                    // Lo  [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH
+	{0xC2DC, 0xC2DC, prLV},                     // Lo       HANGUL SYLLABLE SI
+	{0xC2DD, 0xC2F7, prLVT},                    // Lo  [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH
+	{0xC2F8, 0xC2F8, prLV},                     // Lo       HANGUL SYLLABLE SSA
+	{0xC2F9, 0xC313, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH
+	{0xC314, 0xC314, prLV},                     // Lo       HANGUL SYLLABLE SSAE
+	{0xC315, 0xC32F, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH
+	{0xC330, 0xC330, prLV},                     // Lo       HANGUL SYLLABLE SSYA
+	{0xC331, 0xC34B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH
+	{0xC34C, 0xC34C, prLV},                     // Lo       HANGUL SYLLABLE SSYAE
+	{0xC34D, 0xC367, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH
+	{0xC368, 0xC368, prLV},                     // Lo       HANGUL SYLLABLE SSEO
+	{0xC369, 0xC383, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH
+	{0xC384, 0xC384, prLV},                     // Lo       HANGUL SYLLABLE SSE
+	{0xC385, 0xC39F, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH
+	{0xC3A0, 0xC3A0, prLV},                     // Lo       HANGUL SYLLABLE SSYEO
+	{0xC3A1, 0xC3BB, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH
+	{0xC3BC, 0xC3BC, prLV},                     // Lo       HANGUL SYLLABLE SSYE
+	{0xC3BD, 0xC3D7, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH
+	{0xC3D8, 0xC3D8, prLV},                     // Lo       HANGUL SYLLABLE SSO
+	{0xC3D9, 0xC3F3, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH
+	{0xC3F4, 0xC3F4, prLV},                     // Lo       HANGUL SYLLABLE SSWA
+	{0xC3F5, 0xC40F, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH
+	{0xC410, 0xC410, prLV},                     // Lo       HANGUL SYLLABLE SSWAE
+	{0xC411, 0xC42B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH
+	{0xC42C, 0xC42C, prLV},                     // Lo       HANGUL SYLLABLE SSOE
+	{0xC42D, 0xC447, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH
+	{0xC448, 0xC448, prLV},                     // Lo       HANGUL SYLLABLE SSYO
+	{0xC449, 0xC463, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH
+	{0xC464, 0xC464, prLV},                     // Lo       HANGUL SYLLABLE SSU
+	{0xC465, 0xC47F, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH
+	{0xC480, 0xC480, prLV},                     // Lo       HANGUL SYLLABLE SSWEO
+	{0xC481, 0xC49B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH
+	{0xC49C, 0xC49C, prLV},                     // Lo       HANGUL SYLLABLE SSWE
+	{0xC49D, 0xC4B7, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH
+	{0xC4B8, 0xC4B8, prLV},                     // Lo       HANGUL SYLLABLE SSWI
+	{0xC4B9, 0xC4D3, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH
+	{0xC4D4, 0xC4D4, prLV},                     // Lo       HANGUL SYLLABLE SSYU
+	{0xC4D5, 0xC4EF, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH
+	{0xC4F0, 0xC4F0, prLV},                     // Lo       HANGUL SYLLABLE SSEU
+	{0xC4F1, 0xC50B, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH
+	{0xC50C, 0xC50C, prLV},                     // Lo       HANGUL SYLLABLE SSYI
+	{0xC50D, 0xC527, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH
+	{0xC528, 0xC528, prLV},                     // Lo       HANGUL SYLLABLE SSI
+	{0xC529, 0xC543, prLVT},                    // Lo  [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH
+	{0xC544, 0xC544, prLV},                     // Lo       HANGUL SYLLABLE A
+	{0xC545, 0xC55F, prLVT},                    // Lo  [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH
+	{0xC560, 0xC560, prLV},                     // Lo       HANGUL SYLLABLE AE
+	{0xC561, 0xC57B, prLVT},                    // Lo  [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH
+	{0xC57C, 0xC57C, prLV},                     // Lo       HANGUL SYLLABLE YA
+	{0xC57D, 0xC597, prLVT},                    // Lo  [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH
+	{0xC598, 0xC598, prLV},                     // Lo       HANGUL SYLLABLE YAE
+	{0xC599, 0xC5B3, prLVT},                    // Lo  [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH
+	{0xC5B4, 0xC5B4, prLV},                     // Lo       HANGUL SYLLABLE EO
+	{0xC5B5, 0xC5CF, prLVT},                    // Lo  [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH
+	{0xC5D0, 0xC5D0, prLV},                     // Lo       HANGUL SYLLABLE E
+	{0xC5D1, 0xC5EB, prLVT},                    // Lo  [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH
+	{0xC5EC, 0xC5EC, prLV},                     // Lo       HANGUL SYLLABLE YEO
+	{0xC5ED, 0xC607, prLVT},                    // Lo  [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH
+	{0xC608, 0xC608, prLV},                     // Lo       HANGUL SYLLABLE YE
+	{0xC609, 0xC623, prLVT},                    // Lo  [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH
+	{0xC624, 0xC624, prLV},                     // Lo       HANGUL SYLLABLE O
+	{0xC625, 0xC63F, prLVT},                    // Lo  [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH
+	{0xC640, 0xC640, prLV},                     // Lo       HANGUL SYLLABLE WA
+	{0xC641, 0xC65B, prLVT},                    // Lo  [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH
+	{0xC65C, 0xC65C, prLV},                     // Lo       HANGUL SYLLABLE WAE
+	{0xC65D, 0xC677, prLVT},                    // Lo  [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH
+	{0xC678, 0xC678, prLV},                     // Lo       HANGUL SYLLABLE OE
+	{0xC679, 0xC693, prLVT},                    // Lo  [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH
+	{0xC694, 0xC694, prLV},                     // Lo       HANGUL SYLLABLE YO
+	{0xC695, 0xC6AF, prLVT},                    // Lo  [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH
+	{0xC6B0, 0xC6B0, prLV},                     // Lo       HANGUL SYLLABLE U
+	{0xC6B1, 0xC6CB, prLVT},                    // Lo  [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH
+	{0xC6CC, 0xC6CC, prLV},                     // Lo       HANGUL SYLLABLE WEO
+	{0xC6CD, 0xC6E7, prLVT},                    // Lo  [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH
+	{0xC6E8, 0xC6E8, prLV},                     // Lo       HANGUL SYLLABLE WE
+	{0xC6E9, 0xC703, prLVT},                    // Lo  [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH
+	{0xC704, 0xC704, prLV},                     // Lo       HANGUL SYLLABLE WI
+	{0xC705, 0xC71F, prLVT},                    // Lo  [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH
+	{0xC720, 0xC720, prLV},                     // Lo       HANGUL SYLLABLE YU
+	{0xC721, 0xC73B, prLVT},                    // Lo  [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH
+	{0xC73C, 0xC73C, prLV},                     // Lo       HANGUL SYLLABLE EU
+	{0xC73D, 0xC757, prLVT},                    // Lo  [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH
+	{0xC758, 0xC758, prLV},                     // Lo       HANGUL SYLLABLE YI
+	{0xC759, 0xC773, prLVT},                    // Lo  [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH
+	{0xC774, 0xC774, prLV},                     // Lo       HANGUL SYLLABLE I
+	{0xC775, 0xC78F, prLVT},                    // Lo  [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH
+	{0xC790, 0xC790, prLV},                     // Lo       HANGUL SYLLABLE JA
+	{0xC791, 0xC7AB, prLVT},                    // Lo  [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH
+	{0xC7AC, 0xC7AC, prLV},                     // Lo       HANGUL SYLLABLE JAE
+	{0xC7AD, 0xC7C7, prLVT},                    // Lo  [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH
+	{0xC7C8, 0xC7C8, prLV},                     // Lo       HANGUL SYLLABLE JYA
+	{0xC7C9, 0xC7E3, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH
+	{0xC7E4, 0xC7E4, prLV},                     // Lo       HANGUL SYLLABLE JYAE
+	{0xC7E5, 0xC7FF, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH
+	{0xC800, 0xC800, prLV},                     // Lo       HANGUL SYLLABLE JEO
+	{0xC801, 0xC81B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH
+	{0xC81C, 0xC81C, prLV},                     // Lo       HANGUL SYLLABLE JE
+	{0xC81D, 0xC837, prLVT},                    // Lo  [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH
+	{0xC838, 0xC838, prLV},                     // Lo       HANGUL SYLLABLE JYEO
+	{0xC839, 0xC853, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH
+	{0xC854, 0xC854, prLV},                     // Lo       HANGUL SYLLABLE JYE
+	{0xC855, 0xC86F, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH
+	{0xC870, 0xC870, prLV},                     // Lo       HANGUL SYLLABLE JO
+	{0xC871, 0xC88B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH
+	{0xC88C, 0xC88C, prLV},                     // Lo       HANGUL SYLLABLE JWA
+	{0xC88D, 0xC8A7, prLVT},                    // Lo  [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH
+	{0xC8A8, 0xC8A8, prLV},                     // Lo       HANGUL SYLLABLE JWAE
+	{0xC8A9, 0xC8C3, prLVT},                    // Lo  [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH
+	{0xC8C4, 0xC8C4, prLV},                     // Lo       HANGUL SYLLABLE JOE
+	{0xC8C5, 0xC8DF, prLVT},                    // Lo  [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH
+	{0xC8E0, 0xC8E0, prLV},                     // Lo       HANGUL SYLLABLE JYO
+	{0xC8E1, 0xC8FB, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH
+	{0xC8FC, 0xC8FC, prLV},                     // Lo       HANGUL SYLLABLE JU
+	{0xC8FD, 0xC917, prLVT},                    // Lo  [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH
+	{0xC918, 0xC918, prLV},                     // Lo       HANGUL SYLLABLE JWEO
+	{0xC919, 0xC933, prLVT},                    // Lo  [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH
+	{0xC934, 0xC934, prLV},                     // Lo       HANGUL SYLLABLE JWE
+	{0xC935, 0xC94F, prLVT},                    // Lo  [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH
+	{0xC950, 0xC950, prLV},                     // Lo       HANGUL SYLLABLE JWI
+	{0xC951, 0xC96B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH
+	{0xC96C, 0xC96C, prLV},                     // Lo       HANGUL SYLLABLE JYU
+	{0xC96D, 0xC987, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH
+	{0xC988, 0xC988, prLV},                     // Lo       HANGUL SYLLABLE JEU
+	{0xC989, 0xC9A3, prLVT},                    // Lo  [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH
+	{0xC9A4, 0xC9A4, prLV},                     // Lo       HANGUL SYLLABLE JYI
+	{0xC9A5, 0xC9BF, prLVT},                    // Lo  [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH
+	{0xC9C0, 0xC9C0, prLV},                     // Lo       HANGUL SYLLABLE JI
+	{0xC9C1, 0xC9DB, prLVT},                    // Lo  [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH
+	{0xC9DC, 0xC9DC, prLV},                     // Lo       HANGUL SYLLABLE JJA
+	{0xC9DD, 0xC9F7, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH
+	{0xC9F8, 0xC9F8, prLV},                     // Lo       HANGUL SYLLABLE JJAE
+	{0xC9F9, 0xCA13, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH
+	{0xCA14, 0xCA14, prLV},                     // Lo       HANGUL SYLLABLE JJYA
+	{0xCA15, 0xCA2F, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH
+	{0xCA30, 0xCA30, prLV},                     // Lo       HANGUL SYLLABLE JJYAE
+	{0xCA31, 0xCA4B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH
+	{0xCA4C, 0xCA4C, prLV},                     // Lo       HANGUL SYLLABLE JJEO
+	{0xCA4D, 0xCA67, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH
+	{0xCA68, 0xCA68, prLV},                     // Lo       HANGUL SYLLABLE JJE
+	{0xCA69, 0xCA83, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH
+	{0xCA84, 0xCA84, prLV},                     // Lo       HANGUL SYLLABLE JJYEO
+	{0xCA85, 0xCA9F, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH
+	{0xCAA0, 0xCAA0, prLV},                     // Lo       HANGUL SYLLABLE JJYE
+	{0xCAA1, 0xCABB, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH
+	{0xCABC, 0xCABC, prLV},                     // Lo       HANGUL SYLLABLE JJO
+	{0xCABD, 0xCAD7, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH
+	{0xCAD8, 0xCAD8, prLV},                     // Lo       HANGUL SYLLABLE JJWA
+	{0xCAD9, 0xCAF3, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH
+	{0xCAF4, 0xCAF4, prLV},                     // Lo       HANGUL SYLLABLE JJWAE
+	{0xCAF5, 0xCB0F, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH
+	{0xCB10, 0xCB10, prLV},                     // Lo       HANGUL SYLLABLE JJOE
+	{0xCB11, 0xCB2B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH
+	{0xCB2C, 0xCB2C, prLV},                     // Lo       HANGUL SYLLABLE JJYO
+	{0xCB2D, 0xCB47, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH
+	{0xCB48, 0xCB48, prLV},                     // Lo       HANGUL SYLLABLE JJU
+	{0xCB49, 0xCB63, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH
+	{0xCB64, 0xCB64, prLV},                     // Lo       HANGUL SYLLABLE JJWEO
+	{0xCB65, 0xCB7F, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH
+	{0xCB80, 0xCB80, prLV},                     // Lo       HANGUL SYLLABLE JJWE
+	{0xCB81, 0xCB9B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH
+	{0xCB9C, 0xCB9C, prLV},                     // Lo       HANGUL SYLLABLE JJWI
+	{0xCB9D, 0xCBB7, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH
+	{0xCBB8, 0xCBB8, prLV},                     // Lo       HANGUL SYLLABLE JJYU
+	{0xCBB9, 0xCBD3, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH
+	{0xCBD4, 0xCBD4, prLV},                     // Lo       HANGUL SYLLABLE JJEU
+	{0xCBD5, 0xCBEF, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH
+	{0xCBF0, 0xCBF0, prLV},                     // Lo       HANGUL SYLLABLE JJYI
+	{0xCBF1, 0xCC0B, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH
+	{0xCC0C, 0xCC0C, prLV},                     // Lo       HANGUL SYLLABLE JJI
+	{0xCC0D, 0xCC27, prLVT},                    // Lo  [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH
+	{0xCC28, 0xCC28, prLV},                     // Lo       HANGUL SYLLABLE CA
+	{0xCC29, 0xCC43, prLVT},                    // Lo  [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH
+	{0xCC44, 0xCC44, prLV},                     // Lo       HANGUL SYLLABLE CAE
+	{0xCC45, 0xCC5F, prLVT},                    // Lo  [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH
+	{0xCC60, 0xCC60, prLV},                     // Lo       HANGUL SYLLABLE CYA
+	{0xCC61, 0xCC7B, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH
+	{0xCC7C, 0xCC7C, prLV},                     // Lo       HANGUL SYLLABLE CYAE
+	{0xCC7D, 0xCC97, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH
+	{0xCC98, 0xCC98, prLV},                     // Lo       HANGUL SYLLABLE CEO
+	{0xCC99, 0xCCB3, prLVT},                    // Lo  [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH
+	{0xCCB4, 0xCCB4, prLV},                     // Lo       HANGUL SYLLABLE CE
+	{0xCCB5, 0xCCCF, prLVT},                    // Lo  [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH
+	{0xCCD0, 0xCCD0, prLV},                     // Lo       HANGUL SYLLABLE CYEO
+	{0xCCD1, 0xCCEB, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH
+	{0xCCEC, 0xCCEC, prLV},                     // Lo       HANGUL SYLLABLE CYE
+	{0xCCED, 0xCD07, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH
+	{0xCD08, 0xCD08, prLV},                     // Lo       HANGUL SYLLABLE CO
+	{0xCD09, 0xCD23, prLVT},                    // Lo  [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH
+	{0xCD24, 0xCD24, prLV},                     // Lo       HANGUL SYLLABLE CWA
+	{0xCD25, 0xCD3F, prLVT},                    // Lo  [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH
+	{0xCD40, 0xCD40, prLV},                     // Lo       HANGUL SYLLABLE CWAE
+	{0xCD41, 0xCD5B, prLVT},                    // Lo  [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH
+	{0xCD5C, 0xCD5C, prLV},                     // Lo       HANGUL SYLLABLE COE
+	{0xCD5D, 0xCD77, prLVT},                    // Lo  [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH
+	{0xCD78, 0xCD78, prLV},                     // Lo       HANGUL SYLLABLE CYO
+	{0xCD79, 0xCD93, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH
+	{0xCD94, 0xCD94, prLV},                     // Lo       HANGUL SYLLABLE CU
+	{0xCD95, 0xCDAF, prLVT},                    // Lo  [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH
+	{0xCDB0, 0xCDB0, prLV},                     // Lo       HANGUL SYLLABLE CWEO
+	{0xCDB1, 0xCDCB, prLVT},                    // Lo  [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH
+	{0xCDCC, 0xCDCC, prLV},                     // Lo       HANGUL SYLLABLE CWE
+	{0xCDCD, 0xCDE7, prLVT},                    // Lo  [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH
+	{0xCDE8, 0xCDE8, prLV},                     // Lo       HANGUL SYLLABLE CWI
+	{0xCDE9, 0xCE03, prLVT},                    // Lo  [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH
+	{0xCE04, 0xCE04, prLV},                     // Lo       HANGUL SYLLABLE CYU
+	{0xCE05, 0xCE1F, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH
+	{0xCE20, 0xCE20, prLV},                     // Lo       HANGUL SYLLABLE CEU
+	{0xCE21, 0xCE3B, prLVT},                    // Lo  [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH
+	{0xCE3C, 0xCE3C, prLV},                     // Lo       HANGUL SYLLABLE CYI
+	{0xCE3D, 0xCE57, prLVT},                    // Lo  [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH
+	{0xCE58, 0xCE58, prLV},                     // Lo       HANGUL SYLLABLE CI
+	{0xCE59, 0xCE73, prLVT},                    // Lo  [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH
+	{0xCE74, 0xCE74, prLV},                     // Lo       HANGUL SYLLABLE KA
+	{0xCE75, 0xCE8F, prLVT},                    // Lo  [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH
+	{0xCE90, 0xCE90, prLV},                     // Lo       HANGUL SYLLABLE KAE
+	{0xCE91, 0xCEAB, prLVT},                    // Lo  [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH
+	{0xCEAC, 0xCEAC, prLV},                     // Lo       HANGUL SYLLABLE KYA
+	{0xCEAD, 0xCEC7, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH
+	{0xCEC8, 0xCEC8, prLV},                     // Lo       HANGUL SYLLABLE KYAE
+	{0xCEC9, 0xCEE3, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH
+	{0xCEE4, 0xCEE4, prLV},                     // Lo       HANGUL SYLLABLE KEO
+	{0xCEE5, 0xCEFF, prLVT},                    // Lo  [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH
+	{0xCF00, 0xCF00, prLV},                     // Lo       HANGUL SYLLABLE KE
+	{0xCF01, 0xCF1B, prLVT},                    // Lo  [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH
+	{0xCF1C, 0xCF1C, prLV},                     // Lo       HANGUL SYLLABLE KYEO
+	{0xCF1D, 0xCF37, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH
+	{0xCF38, 0xCF38, prLV},                     // Lo       HANGUL SYLLABLE KYE
+	{0xCF39, 0xCF53, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH
+	{0xCF54, 0xCF54, prLV},                     // Lo       HANGUL SYLLABLE KO
+	{0xCF55, 0xCF6F, prLVT},                    // Lo  [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH
+	{0xCF70, 0xCF70, prLV},                     // Lo       HANGUL SYLLABLE KWA
+	{0xCF71, 0xCF8B, prLVT},                    // Lo  [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH
+	{0xCF8C, 0xCF8C, prLV},                     // Lo       HANGUL SYLLABLE KWAE
+	{0xCF8D, 0xCFA7, prLVT},                    // Lo  [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH
+	{0xCFA8, 0xCFA8, prLV},                     // Lo       HANGUL SYLLABLE KOE
+	{0xCFA9, 0xCFC3, prLVT},                    // Lo  [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH
+	{0xCFC4, 0xCFC4, prLV},                     // Lo       HANGUL SYLLABLE KYO
+	{0xCFC5, 0xCFDF, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH
+	{0xCFE0, 0xCFE0, prLV},                     // Lo       HANGUL SYLLABLE KU
+	{0xCFE1, 0xCFFB, prLVT},                    // Lo  [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH
+	{0xCFFC, 0xCFFC, prLV},                     // Lo       HANGUL SYLLABLE KWEO
+	{0xCFFD, 0xD017, prLVT},                    // Lo  [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH
+	{0xD018, 0xD018, prLV},                     // Lo       HANGUL SYLLABLE KWE
+	{0xD019, 0xD033, prLVT},                    // Lo  [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH
+	{0xD034, 0xD034, prLV},                     // Lo       HANGUL SYLLABLE KWI
+	{0xD035, 0xD04F, prLVT},                    // Lo  [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH
+	{0xD050, 0xD050, prLV},                     // Lo       HANGUL SYLLABLE KYU
+	{0xD051, 0xD06B, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH
+	{0xD06C, 0xD06C, prLV},                     // Lo       HANGUL SYLLABLE KEU
+	{0xD06D, 0xD087, prLVT},                    // Lo  [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH
+	{0xD088, 0xD088, prLV},                     // Lo       HANGUL SYLLABLE KYI
+	{0xD089, 0xD0A3, prLVT},                    // Lo  [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH
+	{0xD0A4, 0xD0A4, prLV},                     // Lo       HANGUL SYLLABLE KI
+	{0xD0A5, 0xD0BF, prLVT},                    // Lo  [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH
+	{0xD0C0, 0xD0C0, prLV},                     // Lo       HANGUL SYLLABLE TA
+	{0xD0C1, 0xD0DB, prLVT},                    // Lo  [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH
+	{0xD0DC, 0xD0DC, prLV},                     // Lo       HANGUL SYLLABLE TAE
+	{0xD0DD, 0xD0F7, prLVT},                    // Lo  [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH
+	{0xD0F8, 0xD0F8, prLV},                     // Lo       HANGUL SYLLABLE TYA
+	{0xD0F9, 0xD113, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH
+	{0xD114, 0xD114, prLV},                     // Lo       HANGUL SYLLABLE TYAE
+	{0xD115, 0xD12F, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH
+	{0xD130, 0xD130, prLV},                     // Lo       HANGUL SYLLABLE TEO
+	{0xD131, 0xD14B, prLVT},                    // Lo  [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH
+	{0xD14C, 0xD14C, prLV},                     // Lo       HANGUL SYLLABLE TE
+	{0xD14D, 0xD167, prLVT},                    // Lo  [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH
+	{0xD168, 0xD168, prLV},                     // Lo       HANGUL SYLLABLE TYEO
+	{0xD169, 0xD183, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH
+	{0xD184, 0xD184, prLV},                     // Lo       HANGUL SYLLABLE TYE
+	{0xD185, 0xD19F, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH
+	{0xD1A0, 0xD1A0, prLV},                     // Lo       HANGUL SYLLABLE TO
+	{0xD1A1, 0xD1BB, prLVT},                    // Lo  [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH
+	{0xD1BC, 0xD1BC, prLV},                     // Lo       HANGUL SYLLABLE TWA
+	{0xD1BD, 0xD1D7, prLVT},                    // Lo  [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH
+	{0xD1D8, 0xD1D8, prLV},                     // Lo       HANGUL SYLLABLE TWAE
+	{0xD1D9, 0xD1F3, prLVT},                    // Lo  [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH
+	{0xD1F4, 0xD1F4, prLV},                     // Lo       HANGUL SYLLABLE TOE
+	{0xD1F5, 0xD20F, prLVT},                    // Lo  [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH
+	{0xD210, 0xD210, prLV},                     // Lo       HANGUL SYLLABLE TYO
+	{0xD211, 0xD22B, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH
+	{0xD22C, 0xD22C, prLV},                     // Lo       HANGUL SYLLABLE TU
+	{0xD22D, 0xD247, prLVT},                    // Lo  [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH
+	{0xD248, 0xD248, prLV},                     // Lo       HANGUL SYLLABLE TWEO
+	{0xD249, 0xD263, prLVT},                    // Lo  [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH
+	{0xD264, 0xD264, prLV},                     // Lo       HANGUL SYLLABLE TWE
+	{0xD265, 0xD27F, prLVT},                    // Lo  [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH
+	{0xD280, 0xD280, prLV},                     // Lo       HANGUL SYLLABLE TWI
+	{0xD281, 0xD29B, prLVT},                    // Lo  [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH
+	{0xD29C, 0xD29C, prLV},                     // Lo       HANGUL SYLLABLE TYU
+	{0xD29D, 0xD2B7, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH
+	{0xD2B8, 0xD2B8, prLV},                     // Lo       HANGUL SYLLABLE TEU
+	{0xD2B9, 0xD2D3, prLVT},                    // Lo  [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH
+	{0xD2D4, 0xD2D4, prLV},                     // Lo       HANGUL SYLLABLE TYI
+	{0xD2D5, 0xD2EF, prLVT},                    // Lo  [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH
+	{0xD2F0, 0xD2F0, prLV},                     // Lo       HANGUL SYLLABLE TI
+	{0xD2F1, 0xD30B, prLVT},                    // Lo  [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH
+	{0xD30C, 0xD30C, prLV},                     // Lo       HANGUL SYLLABLE PA
+	{0xD30D, 0xD327, prLVT},                    // Lo  [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH
+	{0xD328, 0xD328, prLV},                     // Lo       HANGUL SYLLABLE PAE
+	{0xD329, 0xD343, prLVT},                    // Lo  [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH
+	{0xD344, 0xD344, prLV},                     // Lo       HANGUL SYLLABLE PYA
+	{0xD345, 0xD35F, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH
+	{0xD360, 0xD360, prLV},                     // Lo       HANGUL SYLLABLE PYAE
+	{0xD361, 0xD37B, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH
+	{0xD37C, 0xD37C, prLV},                     // Lo       HANGUL SYLLABLE PEO
+	{0xD37D, 0xD397, prLVT},                    // Lo  [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH
+	{0xD398, 0xD398, prLV},                     // Lo       HANGUL SYLLABLE PE
+	{0xD399, 0xD3B3, prLVT},                    // Lo  [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH
+	{0xD3B4, 0xD3B4, prLV},                     // Lo       HANGUL SYLLABLE PYEO
+	{0xD3B5, 0xD3CF, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH
+	{0xD3D0, 0xD3D0, prLV},                     // Lo       HANGUL SYLLABLE PYE
+	{0xD3D1, 0xD3EB, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH
+	{0xD3EC, 0xD3EC, prLV},                     // Lo       HANGUL SYLLABLE PO
+	{0xD3ED, 0xD407, prLVT},                    // Lo  [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH
+	{0xD408, 0xD408, prLV},                     // Lo       HANGUL SYLLABLE PWA
+	{0xD409, 0xD423, prLVT},                    // Lo  [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH
+	{0xD424, 0xD424, prLV},                     // Lo       HANGUL SYLLABLE PWAE
+	{0xD425, 0xD43F, prLVT},                    // Lo  [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH
+	{0xD440, 0xD440, prLV},                     // Lo       HANGUL SYLLABLE POE
+	{0xD441, 0xD45B, prLVT},                    // Lo  [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH
+	{0xD45C, 0xD45C, prLV},                     // Lo       HANGUL SYLLABLE PYO
+	{0xD45D, 0xD477, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH
+	{0xD478, 0xD478, prLV},                     // Lo       HANGUL SYLLABLE PU
+	{0xD479, 0xD493, prLVT},                    // Lo  [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH
+	{0xD494, 0xD494, prLV},                     // Lo       HANGUL SYLLABLE PWEO
+	{0xD495, 0xD4AF, prLVT},                    // Lo  [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH
+	{0xD4B0, 0xD4B0, prLV},                     // Lo       HANGUL SYLLABLE PWE
+	{0xD4B1, 0xD4CB, prLVT},                    // Lo  [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH
+	{0xD4CC, 0xD4CC, prLV},                     // Lo       HANGUL SYLLABLE PWI
+	{0xD4CD, 0xD4E7, prLVT},                    // Lo  [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH
+	{0xD4E8, 0xD4E8, prLV},                     // Lo       HANGUL SYLLABLE PYU
+	{0xD4E9, 0xD503, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH
+	{0xD504, 0xD504, prLV},                     // Lo       HANGUL SYLLABLE PEU
+	{0xD505, 0xD51F, prLVT},                    // Lo  [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH
+	{0xD520, 0xD520, prLV},                     // Lo       HANGUL SYLLABLE PYI
+	{0xD521, 0xD53B, prLVT},                    // Lo  [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH
+	{0xD53C, 0xD53C, prLV},                     // Lo       HANGUL SYLLABLE PI
+	{0xD53D, 0xD557, prLVT},                    // Lo  [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH
+	{0xD558, 0xD558, prLV},                     // Lo       HANGUL SYLLABLE HA
+	{0xD559, 0xD573, prLVT},                    // Lo  [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH
+	{0xD574, 0xD574, prLV},                     // Lo       HANGUL SYLLABLE HAE
+	{0xD575, 0xD58F, prLVT},                    // Lo  [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH
+	{0xD590, 0xD590, prLV},                     // Lo       HANGUL SYLLABLE HYA
+	{0xD591, 0xD5AB, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH
+	{0xD5AC, 0xD5AC, prLV},                     // Lo       HANGUL SYLLABLE HYAE
+	{0xD5AD, 0xD5C7, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH
+	{0xD5C8, 0xD5C8, prLV},                     // Lo       HANGUL SYLLABLE HEO
+	{0xD5C9, 0xD5E3, prLVT},                    // Lo  [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH
+	{0xD5E4, 0xD5E4, prLV},                     // Lo       HANGUL SYLLABLE HE
+	{0xD5E5, 0xD5FF, prLVT},                    // Lo  [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH
+	{0xD600, 0xD600, prLV},                     // Lo       HANGUL SYLLABLE HYEO
+	{0xD601, 0xD61B, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH
+	{0xD61C, 0xD61C, prLV},                     // Lo       HANGUL SYLLABLE HYE
+	{0xD61D, 0xD637, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH
+	{0xD638, 0xD638, prLV},                     // Lo       HANGUL SYLLABLE HO
+	{0xD639, 0xD653, prLVT},                    // Lo  [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH
+	{0xD654, 0xD654, prLV},                     // Lo       HANGUL SYLLABLE HWA
+	{0xD655, 0xD66F, prLVT},                    // Lo  [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH
+	{0xD670, 0xD670, prLV},                     // Lo       HANGUL SYLLABLE HWAE
+	{0xD671, 0xD68B, prLVT},                    // Lo  [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH
+	{0xD68C, 0xD68C, prLV},                     // Lo       HANGUL SYLLABLE HOE
+	{0xD68D, 0xD6A7, prLVT},                    // Lo  [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH
+	{0xD6A8, 0xD6A8, prLV},                     // Lo       HANGUL SYLLABLE HYO
+	{0xD6A9, 0xD6C3, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH
+	{0xD6C4, 0xD6C4, prLV},                     // Lo       HANGUL SYLLABLE HU
+	{0xD6C5, 0xD6DF, prLVT},                    // Lo  [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH
+	{0xD6E0, 0xD6E0, prLV},                     // Lo       HANGUL SYLLABLE HWEO
+	{0xD6E1, 0xD6FB, prLVT},                    // Lo  [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH
+	{0xD6FC, 0xD6FC, prLV},                     // Lo       HANGUL SYLLABLE HWE
+	{0xD6FD, 0xD717, prLVT},                    // Lo  [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH
+	{0xD718, 0xD718, prLV},                     // Lo       HANGUL SYLLABLE HWI
+	{0xD719, 0xD733, prLVT},                    // Lo  [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH
+	{0xD734, 0xD734, prLV},                     // Lo       HANGUL SYLLABLE HYU
+	{0xD735, 0xD74F, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH
+	{0xD750, 0xD750, prLV},                     // Lo       HANGUL SYLLABLE HEU
+	{0xD751, 0xD76B, prLVT},                    // Lo  [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH
+	{0xD76C, 0xD76C, prLV},                     // Lo       HANGUL SYLLABLE HYI
+	{0xD76D, 0xD787, prLVT},                    // Lo  [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH
+	{0xD788, 0xD788, prLV},                     // Lo       HANGUL SYLLABLE HI
+	{0xD789, 0xD7A3, prLVT},                    // Lo  [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH
+	{0xD7B0, 0xD7C6, prV},                      // Lo  [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E
+	{0xD7CB, 0xD7FB, prT},                      // Lo  [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH
+	{0xFB1E, 0xFB1E, prExtend},                 // Mn       HEBREW POINT JUDEO-SPANISH VARIKA
+	{0xFE00, 0xFE0F, prExtend},                 // Mn  [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
+	{0xFE20, 0xFE2F, prExtend},                 // Mn  [16] COMBINING LIGATURE LEFT HALF..COMBINING CYRILLIC TITLO RIGHT HALF
+	{0xFEFF, 0xFEFF, prControl},                // Cf       ZERO WIDTH NO-BREAK SPACE
+	{0xFF9E, 0xFF9F, prExtend},                 // Lm   [2] HALFWIDTH KATAKANA VOICED SOUND MARK..HALFWIDTH KATAKANA SEMI-VOICED SOUND MARK
+	{0xFFF0, 0xFFF8, prControl},                // Cn   [9] <reserved-FFF0>..<reserved-FFF8>
+	{0xFFF9, 0xFFFB, prControl},                // Cf   [3] INTERLINEAR ANNOTATION ANCHOR..INTERLINEAR ANNOTATION TERMINATOR
+	{0x101FD, 0x101FD, prExtend},               // Mn       PHAISTOS DISC SIGN COMBINING OBLIQUE STROKE
+	{0x102E0, 0x102E0, prExtend},               // Mn       COPTIC EPACT THOUSANDS MARK
+	{0x10376, 0x1037A, prExtend},               // Mn   [5] COMBINING OLD PERMIC LETTER AN..COMBINING OLD PERMIC LETTER SII
+	{0x10A01, 0x10A03, prExtend},               // Mn   [3] KHAROSHTHI VOWEL SIGN I..KHAROSHTHI VOWEL SIGN VOCALIC R
+	{0x10A05, 0x10A06, prExtend},               // Mn   [2] KHAROSHTHI VOWEL SIGN E..KHAROSHTHI VOWEL SIGN O
+	{0x10A0C, 0x10A0F, prExtend},               // Mn   [4] KHAROSHTHI VOWEL LENGTH MARK..KHAROSHTHI SIGN VISARGA
+	{0x10A38, 0x10A3A, prExtend},               // Mn   [3] KHAROSHTHI SIGN BAR ABOVE..KHAROSHTHI SIGN DOT BELOW
+	{0x10A3F, 0x10A3F, prExtend},               // Mn       KHAROSHTHI VIRAMA
+	{0x10AE5, 0x10AE6, prExtend},               // Mn   [2] MANICHAEAN ABBREVIATION MARK ABOVE..MANICHAEAN ABBREVIATION MARK BELOW
+	{0x10D24, 0x10D27, prExtend},               // Mn   [4] HANIFI ROHINGYA SIGN HARBAHAY..HANIFI ROHINGYA SIGN TASSI
+	{0x10F46, 0x10F50, prExtend},               // Mn  [11] SOGDIAN COMBINING DOT BELOW..SOGDIAN COMBINING STROKE BELOW
+	{0x11000, 0x11000, prSpacingMark},          // Mc       BRAHMI SIGN CANDRABINDU
+	{0x11001, 0x11001, prExtend},               // Mn       BRAHMI SIGN ANUSVARA
+	{0x11002, 0x11002, prSpacingMark},          // Mc       BRAHMI SIGN VISARGA
+	{0x11038, 0x11046, prExtend},               // Mn  [15] BRAHMI VOWEL SIGN AA..BRAHMI VIRAMA
+	{0x1107F, 0x11081, prExtend},               // Mn   [3] BRAHMI NUMBER JOINER..KAITHI SIGN ANUSVARA
+	{0x11082, 0x11082, prSpacingMark},          // Mc       KAITHI SIGN VISARGA
+	{0x110B0, 0x110B2, prSpacingMark},          // Mc   [3] KAITHI VOWEL SIGN AA..KAITHI VOWEL SIGN II
+	{0x110B3, 0x110B6, prExtend},               // Mn   [4] KAITHI VOWEL SIGN U..KAITHI VOWEL SIGN AI
+	{0x110B7, 0x110B8, prSpacingMark},          // Mc   [2] KAITHI VOWEL SIGN O..KAITHI VOWEL SIGN AU
+	{0x110B9, 0x110BA, prExtend},               // Mn   [2] KAITHI SIGN VIRAMA..KAITHI SIGN NUKTA
+	{0x110BD, 0x110BD, prPreprend},             // Cf       KAITHI NUMBER SIGN
+	{0x110CD, 0x110CD, prPreprend},             // Cf       KAITHI NUMBER SIGN ABOVE
+	{0x11100, 0x11102, prExtend},               // Mn   [3] CHAKMA SIGN CANDRABINDU..CHAKMA SIGN VISARGA
+	{0x11127, 0x1112B, prExtend},               // Mn   [5] CHAKMA VOWEL SIGN A..CHAKMA VOWEL SIGN UU
+	{0x1112C, 0x1112C, prSpacingMark},          // Mc       CHAKMA VOWEL SIGN E
+	{0x1112D, 0x11134, prExtend},               // Mn   [8] CHAKMA VOWEL SIGN AI..CHAKMA MAAYYAA
+	{0x11145, 0x11146, prSpacingMark},          // Mc   [2] CHAKMA VOWEL SIGN AA..CHAKMA VOWEL SIGN EI
+	{0x11173, 0x11173, prExtend},               // Mn       MAHAJANI SIGN NUKTA
+	{0x11180, 0x11181, prExtend},               // Mn   [2] SHARADA SIGN CANDRABINDU..SHARADA SIGN ANUSVARA
+	{0x11182, 0x11182, prSpacingMark},          // Mc       SHARADA SIGN VISARGA
+	{0x111B3, 0x111B5, prSpacingMark},          // Mc   [3] SHARADA VOWEL SIGN AA..SHARADA VOWEL SIGN II
+	{0x111B6, 0x111BE, prExtend},               // Mn   [9] SHARADA VOWEL SIGN U..SHARADA VOWEL SIGN O
+	{0x111BF, 0x111C0, prSpacingMark},          // Mc   [2] SHARADA VOWEL SIGN AU..SHARADA SIGN VIRAMA
+	{0x111C2, 0x111C3, prPreprend},             // Lo   [2] SHARADA SIGN JIHVAMULIYA..SHARADA SIGN UPADHMANIYA
+	{0x111C9, 0x111CC, prExtend},               // Mn   [4] SHARADA SANDHI MARK..SHARADA EXTRA SHORT VOWEL MARK
+	{0x1122C, 0x1122E, prSpacingMark},          // Mc   [3] KHOJKI VOWEL SIGN AA..KHOJKI VOWEL SIGN II
+	{0x1122F, 0x11231, prExtend},               // Mn   [3] KHOJKI VOWEL SIGN U..KHOJKI VOWEL SIGN AI
+	{0x11232, 0x11233, prSpacingMark},          // Mc   [2] KHOJKI VOWEL SIGN O..KHOJKI VOWEL SIGN AU
+	{0x11234, 0x11234, prExtend},               // Mn       KHOJKI SIGN ANUSVARA
+	{0x11235, 0x11235, prSpacingMark},          // Mc       KHOJKI SIGN VIRAMA
+	{0x11236, 0x11237, prExtend},               // Mn   [2] KHOJKI SIGN NUKTA..KHOJKI SIGN SHADDA
+	{0x1123E, 0x1123E, prExtend},               // Mn       KHOJKI SIGN SUKUN
+	{0x112DF, 0x112DF, prExtend},               // Mn       KHUDAWADI SIGN ANUSVARA
+	{0x112E0, 0x112E2, prSpacingMark},          // Mc   [3] KHUDAWADI VOWEL SIGN AA..KHUDAWADI VOWEL SIGN II
+	{0x112E3, 0x112EA, prExtend},               // Mn   [8] KHUDAWADI VOWEL SIGN U..KHUDAWADI SIGN VIRAMA
+	{0x11300, 0x11301, prExtend},               // Mn   [2] GRANTHA SIGN COMBINING ANUSVARA ABOVE..GRANTHA SIGN CANDRABINDU
+	{0x11302, 0x11303, prSpacingMark},          // Mc   [2] GRANTHA SIGN ANUSVARA..GRANTHA SIGN VISARGA
+	{0x1133B, 0x1133C, prExtend},               // Mn   [2] COMBINING BINDU BELOW..GRANTHA SIGN NUKTA
+	{0x1133E, 0x1133E, prExtend},               // Mc       GRANTHA VOWEL SIGN AA
+	{0x1133F, 0x1133F, prSpacingMark},          // Mc       GRANTHA VOWEL SIGN I
+	{0x11340, 0x11340, prExtend},               // Mn       GRANTHA VOWEL SIGN II
+	{0x11341, 0x11344, prSpacingMark},          // Mc   [4] GRANTHA VOWEL SIGN U..GRANTHA VOWEL SIGN VOCALIC RR
+	{0x11347, 0x11348, prSpacingMark},          // Mc   [2] GRANTHA VOWEL SIGN EE..GRANTHA VOWEL SIGN AI
+	{0x1134B, 0x1134D, prSpacingMark},          // Mc   [3] GRANTHA VOWEL SIGN OO..GRANTHA SIGN VIRAMA
+	{0x11357, 0x11357, prExtend},               // Mc       GRANTHA AU LENGTH MARK
+	{0x11362, 0x11363, prSpacingMark},          // Mc   [2] GRANTHA VOWEL SIGN VOCALIC L..GRANTHA VOWEL SIGN VOCALIC LL
+	{0x11366, 0x1136C, prExtend},               // Mn   [7] COMBINING GRANTHA DIGIT ZERO..COMBINING GRANTHA DIGIT SIX
+	{0x11370, 0x11374, prExtend},               // Mn   [5] COMBINING GRANTHA LETTER A..COMBINING GRANTHA LETTER PA
+	{0x11435, 0x11437, prSpacingMark},          // Mc   [3] NEWA VOWEL SIGN AA..NEWA VOWEL SIGN II
+	{0x11438, 0x1143F, prExtend},               // Mn   [8] NEWA VOWEL SIGN U..NEWA VOWEL SIGN AI
+	{0x11440, 0x11441, prSpacingMark},          // Mc   [2] NEWA VOWEL SIGN O..NEWA VOWEL SIGN AU
+	{0x11442, 0x11444, prExtend},               // Mn   [3] NEWA SIGN VIRAMA..NEWA SIGN ANUSVARA
+	{0x11445, 0x11445, prSpacingMark},          // Mc       NEWA SIGN VISARGA
+	{0x11446, 0x11446, prExtend},               // Mn       NEWA SIGN NUKTA
+	{0x1145E, 0x1145E, prExtend},               // Mn       NEWA SANDHI MARK
+	{0x114B0, 0x114B0, prExtend},               // Mc       TIRHUTA VOWEL SIGN AA
+	{0x114B1, 0x114B2, prSpacingMark},          // Mc   [2] TIRHUTA VOWEL SIGN I..TIRHUTA VOWEL SIGN II
+	{0x114B3, 0x114B8, prExtend},               // Mn   [6] TIRHUTA VOWEL SIGN U..TIRHUTA VOWEL SIGN VOCALIC LL
+	{0x114B9, 0x114B9, prSpacingMark},          // Mc       TIRHUTA VOWEL SIGN E
+	{0x114BA, 0x114BA, prExtend},               // Mn       TIRHUTA VOWEL SIGN SHORT E
+	{0x114BB, 0x114BC, prSpacingMark},          // Mc   [2] TIRHUTA VOWEL SIGN AI..TIRHUTA VOWEL SIGN O
+	{0x114BD, 0x114BD, prExtend},               // Mc       TIRHUTA VOWEL SIGN SHORT O
+	{0x114BE, 0x114BE, prSpacingMark},          // Mc       TIRHUTA VOWEL SIGN AU
+	{0x114BF, 0x114C0, prExtend},               // Mn   [2] TIRHUTA SIGN CANDRABINDU..TIRHUTA SIGN ANUSVARA
+	{0x114C1, 0x114C1, prSpacingMark},          // Mc       TIRHUTA SIGN VISARGA
+	{0x114C2, 0x114C3, prExtend},               // Mn   [2] TIRHUTA SIGN VIRAMA..TIRHUTA SIGN NUKTA
+	{0x115AF, 0x115AF, prExtend},               // Mc       SIDDHAM VOWEL SIGN AA
+	{0x115B0, 0x115B1, prSpacingMark},          // Mc   [2] SIDDHAM VOWEL SIGN I..SIDDHAM VOWEL SIGN II
+	{0x115B2, 0x115B5, prExtend},               // Mn   [4] SIDDHAM VOWEL SIGN U..SIDDHAM VOWEL SIGN VOCALIC RR
+	{0x115B8, 0x115BB, prSpacingMark},          // Mc   [4] SIDDHAM VOWEL SIGN E..SIDDHAM VOWEL SIGN AU
+	{0x115BC, 0x115BD, prExtend},               // Mn   [2] SIDDHAM SIGN CANDRABINDU..SIDDHAM SIGN ANUSVARA
+	{0x115BE, 0x115BE, prSpacingMark},          // Mc       SIDDHAM SIGN VISARGA
+	{0x115BF, 0x115C0, prExtend},               // Mn   [2] SIDDHAM SIGN VIRAMA..SIDDHAM SIGN NUKTA
+	{0x115DC, 0x115DD, prExtend},               // Mn   [2] SIDDHAM VOWEL SIGN ALTERNATE U..SIDDHAM VOWEL SIGN ALTERNATE UU
+	{0x11630, 0x11632, prSpacingMark},          // Mc   [3] MODI VOWEL SIGN AA..MODI VOWEL SIGN II
+	{0x11633, 0x1163A, prExtend},               // Mn   [8] MODI VOWEL SIGN U..MODI VOWEL SIGN AI
+	{0x1163B, 0x1163C, prSpacingMark},          // Mc   [2] MODI VOWEL SIGN O..MODI VOWEL SIGN AU
+	{0x1163D, 0x1163D, prExtend},               // Mn       MODI SIGN ANUSVARA
+	{0x1163E, 0x1163E, prSpacingMark},          // Mc       MODI SIGN VISARGA
+	{0x1163F, 0x11640, prExtend},               // Mn   [2] MODI SIGN VIRAMA..MODI SIGN ARDHACANDRA
+	{0x116AB, 0x116AB, prExtend},               // Mn       TAKRI SIGN ANUSVARA
+	{0x116AC, 0x116AC, prSpacingMark},          // Mc       TAKRI SIGN VISARGA
+	{0x116AD, 0x116AD, prExtend},               // Mn       TAKRI VOWEL SIGN AA
+	{0x116AE, 0x116AF, prSpacingMark},          // Mc   [2] TAKRI VOWEL SIGN I..TAKRI VOWEL SIGN II
+	{0x116B0, 0x116B5, prExtend},               // Mn   [6] TAKRI VOWEL SIGN U..TAKRI VOWEL SIGN AU
+	{0x116B6, 0x116B6, prSpacingMark},          // Mc       TAKRI SIGN VIRAMA
+	{0x116B7, 0x116B7, prExtend},               // Mn       TAKRI SIGN NUKTA
+	{0x1171D, 0x1171F, prExtend},               // Mn   [3] AHOM CONSONANT SIGN MEDIAL LA..AHOM CONSONANT SIGN MEDIAL LIGATING RA
+	{0x11720, 0x11721, prSpacingMark},          // Mc   [2] AHOM VOWEL SIGN A..AHOM VOWEL SIGN AA
+	{0x11722, 0x11725, prExtend},               // Mn   [4] AHOM VOWEL SIGN I..AHOM VOWEL SIGN UU
+	{0x11726, 0x11726, prSpacingMark},          // Mc       AHOM VOWEL SIGN E
+	{0x11727, 0x1172B, prExtend},               // Mn   [5] AHOM VOWEL SIGN AW..AHOM SIGN KILLER
+	{0x1182C, 0x1182E, prSpacingMark},          // Mc   [3] DOGRA VOWEL SIGN AA..DOGRA VOWEL SIGN II
+	{0x1182F, 0x11837, prExtend},               // Mn   [9] DOGRA VOWEL SIGN U..DOGRA SIGN ANUSVARA
+	{0x11838, 0x11838, prSpacingMark},          // Mc       DOGRA SIGN VISARGA
+	{0x11839, 0x1183A, prExtend},               // Mn   [2] DOGRA SIGN VIRAMA..DOGRA SIGN NUKTA
+	{0x119D1, 0x119D3, prSpacingMark},          // Mc   [3] NANDINAGARI VOWEL SIGN AA..NANDINAGARI VOWEL SIGN II
+	{0x119D4, 0x119D7, prExtend},               // Mn   [4] NANDINAGARI VOWEL SIGN U..NANDINAGARI VOWEL SIGN VOCALIC RR
+	{0x119DA, 0x119DB, prExtend},               // Mn   [2] NANDINAGARI VOWEL SIGN E..NANDINAGARI VOWEL SIGN AI
+	{0x119DC, 0x119DF, prSpacingMark},          // Mc   [4] NANDINAGARI VOWEL SIGN O..NANDINAGARI SIGN VISARGA
+	{0x119E0, 0x119E0, prExtend},               // Mn       NANDINAGARI SIGN VIRAMA
+	{0x119E4, 0x119E4, prSpacingMark},          // Mc       NANDINAGARI VOWEL SIGN PRISHTHAMATRA E
+	{0x11A01, 0x11A0A, prExtend},               // Mn  [10] ZANABAZAR SQUARE VOWEL SIGN I..ZANABAZAR SQUARE VOWEL LENGTH MARK
+	{0x11A33, 0x11A38, prExtend},               // Mn   [6] ZANABAZAR SQUARE FINAL CONSONANT MARK..ZANABAZAR SQUARE SIGN ANUSVARA
+	{0x11A39, 0x11A39, prSpacingMark},          // Mc       ZANABAZAR SQUARE SIGN VISARGA
+	{0x11A3A, 0x11A3A, prPreprend},             // Lo       ZANABAZAR SQUARE CLUSTER-INITIAL LETTER RA
+	{0x11A3B, 0x11A3E, prExtend},               // Mn   [4] ZANABAZAR SQUARE CLUSTER-FINAL LETTER YA..ZANABAZAR SQUARE CLUSTER-FINAL LETTER VA
+	{0x11A47, 0x11A47, prExtend},               // Mn       ZANABAZAR SQUARE SUBJOINER
+	{0x11A51, 0x11A56, prExtend},               // Mn   [6] SOYOMBO VOWEL SIGN I..SOYOMBO VOWEL SIGN OE
+	{0x11A57, 0x11A58, prSpacingMark},          // Mc   [2] SOYOMBO VOWEL SIGN AI..SOYOMBO VOWEL SIGN AU
+	{0x11A59, 0x11A5B, prExtend},               // Mn   [3] SOYOMBO VOWEL SIGN VOCALIC R..SOYOMBO VOWEL LENGTH MARK
+	{0x11A84, 0x11A89, prPreprend},             // Lo   [6] SOYOMBO SIGN JIHVAMULIYA..SOYOMBO CLUSTER-INITIAL LETTER SA
+	{0x11A8A, 0x11A96, prExtend},               // Mn  [13] SOYOMBO FINAL CONSONANT SIGN G..SOYOMBO SIGN ANUSVARA
+	{0x11A97, 0x11A97, prSpacingMark},          // Mc       SOYOMBO SIGN VISARGA
+	{0x11A98, 0x11A99, prExtend},               // Mn   [2] SOYOMBO GEMINATION MARK..SOYOMBO SUBJOINER
+	{0x11C2F, 0x11C2F, prSpacingMark},          // Mc       BHAIKSUKI VOWEL SIGN AA
+	{0x11C30, 0x11C36, prExtend},               // Mn   [7] BHAIKSUKI VOWEL SIGN I..BHAIKSUKI VOWEL SIGN VOCALIC L
+	{0x11C38, 0x11C3D, prExtend},               // Mn   [6] BHAIKSUKI VOWEL SIGN E..BHAIKSUKI SIGN ANUSVARA
+	{0x11C3E, 0x11C3E, prSpacingMark},          // Mc       BHAIKSUKI SIGN VISARGA
+	{0x11C3F, 0x11C3F, prExtend},               // Mn       BHAIKSUKI SIGN VIRAMA
+	{0x11C92, 0x11CA7, prExtend},               // Mn  [22] MARCHEN SUBJOINED LETTER KA..MARCHEN SUBJOINED LETTER ZA
+	{0x11CA9, 0x11CA9, prSpacingMark},          // Mc       MARCHEN SUBJOINED LETTER YA
+	{0x11CAA, 0x11CB0, prExtend},               // Mn   [7] MARCHEN SUBJOINED LETTER RA..MARCHEN VOWEL SIGN AA
+	{0x11CB1, 0x11CB1, prSpacingMark},          // Mc       MARCHEN VOWEL SIGN I
+	{0x11CB2, 0x11CB3, prExtend},               // Mn   [2] MARCHEN VOWEL SIGN U..MARCHEN VOWEL SIGN E
+	{0x11CB4, 0x11CB4, prSpacingMark},          // Mc       MARCHEN VOWEL SIGN O
+	{0x11CB5, 0x11CB6, prExtend},               // Mn   [2] MARCHEN SIGN ANUSVARA..MARCHEN SIGN CANDRABINDU
+	{0x11D31, 0x11D36, prExtend},               // Mn   [6] MASARAM GONDI VOWEL SIGN AA..MASARAM GONDI VOWEL SIGN VOCALIC R
+	{0x11D3A, 0x11D3A, prExtend},               // Mn       MASARAM GONDI VOWEL SIGN E
+	{0x11D3C, 0x11D3D, prExtend},               // Mn   [2] MASARAM GONDI VOWEL SIGN AI..MASARAM GONDI VOWEL SIGN O
+	{0x11D3F, 0x11D45, prExtend},               // Mn   [7] MASARAM GONDI VOWEL SIGN AU..MASARAM GONDI VIRAMA
+	{0x11D46, 0x11D46, prPreprend},             // Lo       MASARAM GONDI REPHA
+	{0x11D47, 0x11D47, prExtend},               // Mn       MASARAM GONDI RA-KARA
+	{0x11D8A, 0x11D8E, prSpacingMark},          // Mc   [5] GUNJALA GONDI VOWEL SIGN AA..GUNJALA GONDI VOWEL SIGN UU
+	{0x11D90, 0x11D91, prExtend},               // Mn   [2] GUNJALA GONDI VOWEL SIGN EE..GUNJALA GONDI VOWEL SIGN AI
+	{0x11D93, 0x11D94, prSpacingMark},          // Mc   [2] GUNJALA GONDI VOWEL SIGN OO..GUNJALA GONDI VOWEL SIGN AU
+	{0x11D95, 0x11D95, prExtend},               // Mn       GUNJALA GONDI SIGN ANUSVARA
+	{0x11D96, 0x11D96, prSpacingMark},          // Mc       GUNJALA GONDI SIGN VISARGA
+	{0x11D97, 0x11D97, prExtend},               // Mn       GUNJALA GONDI VIRAMA
+	{0x11EF3, 0x11EF4, prExtend},               // Mn   [2] MAKASAR VOWEL SIGN I..MAKASAR VOWEL SIGN U
+	{0x11EF5, 0x11EF6, prSpacingMark},          // Mc   [2] MAKASAR VOWEL SIGN E..MAKASAR VOWEL SIGN O
+	{0x13430, 0x13438, prControl},              // Cf   [9] EGYPTIAN HIEROGLYPH VERTICAL JOINER..EGYPTIAN HIEROGLYPH END SEGMENT
+	{0x16AF0, 0x16AF4, prExtend},               // Mn   [5] BASSA VAH COMBINING HIGH TONE..BASSA VAH COMBINING HIGH-LOW TONE
+	{0x16B30, 0x16B36, prExtend},               // Mn   [7] PAHAWH HMONG MARK CIM TUB..PAHAWH HMONG MARK CIM TAUM
+	{0x16F4F, 0x16F4F, prExtend},               // Mn       MIAO SIGN CONSONANT MODIFIER BAR
+	{0x16F51, 0x16F87, prSpacingMark},          // Mc  [55] MIAO SIGN ASPIRATION..MIAO VOWEL SIGN UI
+	{0x16F8F, 0x16F92, prExtend},               // Mn   [4] MIAO TONE RIGHT..MIAO TONE BELOW
+	{0x1BC9D, 0x1BC9E, prExtend},               // Mn   [2] DUPLOYAN THICK LETTER SELECTOR..DUPLOYAN DOUBLE MARK
+	{0x1BCA0, 0x1BCA3, prControl},              // Cf   [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
+	{0x1D165, 0x1D165, prExtend},               // Mc       MUSICAL SYMBOL COMBINING STEM
+	{0x1D166, 0x1D166, prSpacingMark},          // Mc       MUSICAL SYMBOL COMBINING SPRECHGESANG STEM
+	{0x1D167, 0x1D169, prExtend},               // Mn   [3] MUSICAL SYMBOL COMBINING TREMOLO-1..MUSICAL SYMBOL COMBINING TREMOLO-3
+	{0x1D16D, 0x1D16D, prSpacingMark},          // Mc       MUSICAL SYMBOL COMBINING AUGMENTATION DOT
+	{0x1D16E, 0x1D172, prExtend},               // Mc   [5] MUSICAL SYMBOL COMBINING FLAG-1..MUSICAL SYMBOL COMBINING FLAG-5
+	{0x1D173, 0x1D17A, prControl},              // Cf   [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
+	{0x1D17B, 0x1D182, prExtend},               // Mn   [8] MUSICAL SYMBOL COMBINING ACCENT..MUSICAL SYMBOL COMBINING LOURE
+	{0x1D185, 0x1D18B, prExtend},               // Mn   [7] MUSICAL SYMBOL COMBINING DOIT..MUSICAL SYMBOL COMBINING TRIPLE TONGUE
+	{0x1D1AA, 0x1D1AD, prExtend},               // Mn   [4] MUSICAL SYMBOL COMBINING DOWN BOW..MUSICAL SYMBOL COMBINING SNAP PIZZICATO
+	{0x1D242, 0x1D244, prExtend},               // Mn   [3] COMBINING GREEK MUSICAL TRISEME..COMBINING GREEK MUSICAL PENTASEME
+	{0x1DA00, 0x1DA36, prExtend},               // Mn  [55] SIGNWRITING HEAD RIM..SIGNWRITING AIR SUCKING IN
+	{0x1DA3B, 0x1DA6C, prExtend},               // Mn  [50] SIGNWRITING MOUTH CLOSED NEUTRAL..SIGNWRITING EXCITEMENT
+	{0x1DA75, 0x1DA75, prExtend},               // Mn       SIGNWRITING UPPER BODY TILTING FROM HIP JOINTS
+	{0x1DA84, 0x1DA84, prExtend},               // Mn       SIGNWRITING LOCATION HEAD NECK
+	{0x1DA9B, 0x1DA9F, prExtend},               // Mn   [5] SIGNWRITING FILL MODIFIER-2..SIGNWRITING FILL MODIFIER-6
+	{0x1DAA1, 0x1DAAF, prExtend},               // Mn  [15] SIGNWRITING ROTATION MODIFIER-2..SIGNWRITING ROTATION MODIFIER-16
+	{0x1E000, 0x1E006, prExtend},               // Mn   [7] COMBINING GLAGOLITIC LETTER AZU..COMBINING GLAGOLITIC LETTER ZHIVETE
+	{0x1E008, 0x1E018, prExtend},               // Mn  [17] COMBINING GLAGOLITIC LETTER ZEMLJA..COMBINING GLAGOLITIC LETTER HERU
+	{0x1E01B, 0x1E021, prExtend},               // Mn   [7] COMBINING GLAGOLITIC LETTER SHTA..COMBINING GLAGOLITIC LETTER YATI
+	{0x1E023, 0x1E024, prExtend},               // Mn   [2] COMBINING GLAGOLITIC LETTER YU..COMBINING GLAGOLITIC LETTER SMALL YUS
+	{0x1E026, 0x1E02A, prExtend},               // Mn   [5] COMBINING GLAGOLITIC LETTER YO..COMBINING GLAGOLITIC LETTER FITA
+	{0x1E130, 0x1E136, prExtend},               // Mn   [7] NYIAKENG PUACHUE HMONG TONE-B..NYIAKENG PUACHUE HMONG TONE-D
+	{0x1E2EC, 0x1E2EF, prExtend},               // Mn   [4] WANCHO TONE TUP..WANCHO TONE KOINI
+	{0x1E8D0, 0x1E8D6, prExtend},               // Mn   [7] MENDE KIKAKUI COMBINING NUMBER TEENS..MENDE KIKAKUI COMBINING NUMBER MILLIONS
+	{0x1E944, 0x1E94A, prExtend},               // Mn   [7] ADLAM ALIF LENGTHENER..ADLAM NUKTA
+	{0x1F000, 0x1F02B, prExtendedPictographic}, //  5.1 [44] (🀀..🀫)    MAHJONG TILE EAST WIND..MAHJONG TILE BACK
+	{0x1F02C, 0x1F02F, prExtendedPictographic}, //   NA  [4] (🀬..🀯)    <reserved-1F02C>..<reserved-1F02F>
+	{0x1F030, 0x1F093, prExtendedPictographic}, //  5.1[100] (🀰..🂓)    DOMINO TILE HORIZONTAL BACK..DOMINO TILE VERTICAL-06-06
+	{0x1F094, 0x1F09F, prExtendedPictographic}, //   NA [12] (🂔..🂟)    <reserved-1F094>..<reserved-1F09F>
+	{0x1F0A0, 0x1F0AE, prExtendedPictographic}, //  6.0 [15] (🂠..🂮)    PLAYING CARD BACK..PLAYING CARD KING OF SPADES
+	{0x1F0AF, 0x1F0B0, prExtendedPictographic}, //   NA  [2] (🂯..🂰)    <reserved-1F0AF>..<reserved-1F0B0>
+	{0x1F0B1, 0x1F0BE, prExtendedPictographic}, //  6.0 [14] (🂱..🂾)    PLAYING CARD ACE OF HEARTS..PLAYING CARD KING OF HEARTS
+	{0x1F0BF, 0x1F0BF, prExtendedPictographic}, //  7.0  [1] (🂿)       PLAYING CARD RED JOKER
+	{0x1F0C0, 0x1F0C0, prExtendedPictographic}, //   NA  [1] (🃀)       <reserved-1F0C0>
+	{0x1F0C1, 0x1F0CF, prExtendedPictographic}, //  6.0 [15] (🃁..🃏)    PLAYING CARD ACE OF DIAMONDS..joker
+	{0x1F0D0, 0x1F0D0, prExtendedPictographic}, //   NA  [1] (🃐)       <reserved-1F0D0>
+	{0x1F0D1, 0x1F0DF, prExtendedPictographic}, //  6.0 [15] (🃑..🃟)    PLAYING CARD ACE OF CLUBS..PLAYING CARD WHITE JOKER
+	{0x1F0E0, 0x1F0F5, prExtendedPictographic}, //  7.0 [22] (🃠..🃵)    PLAYING CARD FOOL..PLAYING CARD TRUMP-21
+	{0x1F0F6, 0x1F0FF, prExtendedPictographic}, //   NA [10] (🃶..🃿)    <reserved-1F0F6>..<reserved-1F0FF>
+	{0x1F10D, 0x1F10F, prExtendedPictographic}, //   NA  [3] (🄍..🄏)    <reserved-1F10D>..<reserved-1F10F>
+	{0x1F12F, 0x1F12F, prExtendedPictographic}, // 11.0  [1] (🄯)       COPYLEFT SYMBOL
+	{0x1F16C, 0x1F16C, prExtendedPictographic}, // 12.0  [1] (🅬)       RAISED MR SIGN
+	{0x1F16D, 0x1F16F, prExtendedPictographic}, //   NA  [3] (🅭..🅯)    <reserved-1F16D>..<reserved-1F16F>
+	{0x1F170, 0x1F171, prExtendedPictographic}, //  6.0  [2] (🅰️..🅱️)    A button (blood type)..B button (blood type)
+	{0x1F17E, 0x1F17E, prExtendedPictographic}, //  6.0  [1] (🅾️)       O button (blood type)
+	{0x1F17F, 0x1F17F, prExtendedPictographic}, //  5.2  [1] (🅿️)       P button
+	{0x1F18E, 0x1F18E, prExtendedPictographic}, //  6.0  [1] (🆎)       AB button (blood type)
+	{0x1F191, 0x1F19A, prExtendedPictographic}, //  6.0 [10] (🆑..🆚)    CL button..VS button
+	{0x1F1AD, 0x1F1E5, prExtendedPictographic}, //   NA [57] (🆭..🇥)    <reserved-1F1AD>..<reserved-1F1E5>
+	{0x1F1E6, 0x1F1FF, prRegionalIndicator},    // So  [26] REGIONAL INDICATOR SYMBOL LETTER A..REGIONAL INDICATOR SYMBOL LETTER Z
+	{0x1F201, 0x1F202, prExtendedPictographic}, //  6.0  [2] (🈁..🈂️)    Japanese “here” button..Japanese “service charge” button
+	{0x1F203, 0x1F20F, prExtendedPictographic}, //   NA [13] (🈃..🈏)    <reserved-1F203>..<reserved-1F20F>
+	{0x1F21A, 0x1F21A, prExtendedPictographic}, //  5.2  [1] (🈚)       Japanese “free of charge” button
+	{0x1F22F, 0x1F22F, prExtendedPictographic}, //  5.2  [1] (🈯)       Japanese “reserved” button
+	{0x1F232, 0x1F23A, prExtendedPictographic}, //  6.0  [9] (🈲..🈺)    Japanese “prohibited” button..Japanese “open for business” button
+	{0x1F23C, 0x1F23F, prExtendedPictographic}, //   NA  [4] (🈼..🈿)    <reserved-1F23C>..<reserved-1F23F>
+	{0x1F249, 0x1F24F, prExtendedPictographic}, //   NA  [7] (🉉..🉏)    <reserved-1F249>..<reserved-1F24F>
+	{0x1F250, 0x1F251, prExtendedPictographic}, //  6.0  [2] (🉐..🉑)    Japanese “bargain” button..Japanese “acceptable” button
+	{0x1F252, 0x1F25F, prExtendedPictographic}, //   NA [14] (🉒..🉟)    <reserved-1F252>..<reserved-1F25F>
+	{0x1F260, 0x1F265, prExtendedPictographic}, // 10.0  [6] (🉠..🉥)    ROUNDED SYMBOL FOR FU..ROUNDED SYMBOL FOR CAI
+	{0x1F266, 0x1F2FF, prExtendedPictographic}, //   NA[154] (🉦..🋿)    <reserved-1F266>..<reserved-1F2FF>
+	{0x1F300, 0x1F320, prExtendedPictographic}, //  6.0 [33] (🌀..🌠)    cyclone..shooting star
+	{0x1F321, 0x1F32C, prExtendedPictographic}, //  7.0 [12] (🌡️..🌬️)    thermometer..wind face
+	{0x1F32D, 0x1F32F, prExtendedPictographic}, //  8.0  [3] (🌭..🌯)    hot dog..burrito
+	{0x1F330, 0x1F335, prExtendedPictographic}, //  6.0  [6] (🌰..🌵)    chestnut..cactus
+	{0x1F336, 0x1F336, prExtendedPictographic}, //  7.0  [1] (🌶️)       hot pepper
+	{0x1F337, 0x1F37C, prExtendedPictographic}, //  6.0 [70] (🌷..🍼)    tulip..baby bottle
+	{0x1F37D, 0x1F37D, prExtendedPictographic}, //  7.0  [1] (🍽️)       fork and knife with plate
+	{0x1F37E, 0x1F37F, prExtendedPictographic}, //  8.0  [2] (🍾..🍿)    bottle with popping cork..popcorn
+	{0x1F380, 0x1F393, prExtendedPictographic}, //  6.0 [20] (🎀..🎓)    ribbon..graduation cap
+	{0x1F394, 0x1F39F, prExtendedPictographic}, //  7.0 [12] (🎔..🎟️)    HEART WITH TIP ON THE LEFT..admission tickets
+	{0x1F3A0, 0x1F3C4, prExtendedPictographic}, //  6.0 [37] (🎠..🏄)    carousel horse..person surfing
+	{0x1F3C5, 0x1F3C5, prExtendedPictographic}, //  7.0  [1] (🏅)       sports medal
+	{0x1F3C6, 0x1F3CA, prExtendedPictographic}, //  6.0  [5] (🏆..🏊)    trophy..person swimming
+	{0x1F3CB, 0x1F3CE, prExtendedPictographic}, //  7.0  [4] (🏋️..🏎️)    person lifting weights..racing car
+	{0x1F3CF, 0x1F3D3, prExtendedPictographic}, //  8.0  [5] (🏏..🏓)    cricket game..ping pong
+	{0x1F3D4, 0x1F3DF, prExtendedPictographic}, //  7.0 [12] (🏔️..🏟️)    snow-capped mountain..stadium
+	{0x1F3E0, 0x1F3F0, prExtendedPictographic}, //  6.0 [17] (🏠..🏰)    house..castle
+	{0x1F3F1, 0x1F3F7, prExtendedPictographic}, //  7.0  [7] (🏱..🏷️)    WHITE PENNANT..label
+	{0x1F3F8, 0x1F3FA, prExtendedPictographic}, //  8.0  [3] (🏸..🏺)    badminton..amphora
+	{0x1F3FB, 0x1F3FF, prExtend},               // Sk   [5] EMOJI MODIFIER FITZPATRICK TYPE-1-2..EMOJI MODIFIER FITZPATRICK TYPE-6
+	{0x1F400, 0x1F43E, prExtendedPictographic}, //  6.0 [63] (🐀..🐾)    rat..paw prints
+	{0x1F43F, 0x1F43F, prExtendedPictographic}, //  7.0  [1] (🐿️)       chipmunk
+	{0x1F440, 0x1F440, prExtendedPictographic}, //  6.0  [1] (👀)       eyes
+	{0x1F441, 0x1F441, prExtendedPictographic}, //  7.0  [1] (👁️)       eye
+	{0x1F442, 0x1F4F7, prExtendedPictographic}, //  6.0[182] (👂..📷)    ear..camera
+	{0x1F4F8, 0x1F4F8, prExtendedPictographic}, //  7.0  [1] (📸)       camera with flash
+	{0x1F4F9, 0x1F4FC, prExtendedPictographic}, //  6.0  [4] (📹..📼)    video camera..videocassette
+	{0x1F4FD, 0x1F4FE, prExtendedPictographic}, //  7.0  [2] (📽️..📾)    film projector..PORTABLE STEREO
+	{0x1F4FF, 0x1F4FF, prExtendedPictographic}, //  8.0  [1] (📿)       prayer beads
+	{0x1F500, 0x1F53D, prExtendedPictographic}, //  6.0 [62] (🔀..🔽)    shuffle tracks button..downwards button
+	{0x1F546, 0x1F54A, prExtendedPictographic}, //  7.0  [5] (🕆..🕊️)    WHITE LATIN CROSS..dove
+	{0x1F54B, 0x1F54F, prExtendedPictographic}, //  8.0  [5] (🕋..🕏)    kaaba..BOWL OF HYGIEIA
+	{0x1F550, 0x1F567, prExtendedPictographic}, //  6.0 [24] (🕐..🕧)    one o’clock..twelve-thirty
+	{0x1F568, 0x1F579, prExtendedPictographic}, //  7.0 [18] (🕨..🕹️)    RIGHT SPEAKER..joystick
+	{0x1F57A, 0x1F57A, prExtendedPictographic}, //  9.0  [1] (🕺)       man dancing
+	{0x1F57B, 0x1F5A3, prExtendedPictographic}, //  7.0 [41] (🕻..🖣)    LEFT HAND TELEPHONE RECEIVER..BLACK DOWN POINTING BACKHAND INDEX
+	{0x1F5A4, 0x1F5A4, prExtendedPictographic}, //  9.0  [1] (🖤)       black heart
+	{0x1F5A5, 0x1F5FA, prExtendedPictographic}, //  7.0 [86] (🖥️..🗺️)    desktop computer..world map
+	{0x1F5FB, 0x1F5FF, prExtendedPictographic}, //  6.0  [5] (🗻..🗿)    mount fuji..moai
+	{0x1F600, 0x1F600, prExtendedPictographic}, //  6.1  [1] (😀)       grinning face
+	{0x1F601, 0x1F610, prExtendedPictographic}, //  6.0 [16] (😁..😐)    beaming face with smiling eyes..neutral face
+	{0x1F611, 0x1F611, prExtendedPictographic}, //  6.1  [1] (😑)       expressionless face
+	{0x1F612, 0x1F614, prExtendedPictographic}, //  6.0  [3] (😒..😔)    unamused face..pensive face
+	{0x1F615, 0x1F615, prExtendedPictographic}, //  6.1  [1] (😕)       confused face
+	{0x1F616, 0x1F616, prExtendedPictographic}, //  6.0  [1] (😖)       confounded face
+	{0x1F617, 0x1F617, prExtendedPictographic}, //  6.1  [1] (😗)       kissing face
+	{0x1F618, 0x1F618, prExtendedPictographic}, //  6.0  [1] (😘)       face blowing a kiss
+	{0x1F619, 0x1F619, prExtendedPictographic}, //  6.1  [1] (😙)       kissing face with smiling eyes
+	{0x1F61A, 0x1F61A, prExtendedPictographic}, //  6.0  [1] (😚)       kissing face with closed eyes
+	{0x1F61B, 0x1F61B, prExtendedPictographic}, //  6.1  [1] (😛)       face with tongue
+	{0x1F61C, 0x1F61E, prExtendedPictographic}, //  6.0  [3] (😜..😞)    winking face with tongue..disappointed face
+	{0x1F61F, 0x1F61F, prExtendedPictographic}, //  6.1  [1] (😟)       worried face
+	{0x1F620, 0x1F625, prExtendedPictographic}, //  6.0  [6] (😠..😥)    angry face..sad but relieved face
+	{0x1F626, 0x1F627, prExtendedPictographic}, //  6.1  [2] (😦..😧)    frowning face with open mouth..anguished face
+	{0x1F628, 0x1F62B, prExtendedPictographic}, //  6.0  [4] (😨..😫)    fearful face..tired face
+	{0x1F62C, 0x1F62C, prExtendedPictographic}, //  6.1  [1] (😬)       grimacing face
+	{0x1F62D, 0x1F62D, prExtendedPictographic}, //  6.0  [1] (😭)       loudly crying face
+	{0x1F62E, 0x1F62F, prExtendedPictographic}, //  6.1  [2] (😮..😯)    face with open mouth..hushed face
+	{0x1F630, 0x1F633, prExtendedPictographic}, //  6.0  [4] (😰..😳)    anxious face with sweat..flushed face
+	{0x1F634, 0x1F634, prExtendedPictographic}, //  6.1  [1] (😴)       sleeping face
+	{0x1F635, 0x1F640, prExtendedPictographic}, //  6.0 [12] (😵..🙀)    dizzy face..weary cat
+	{0x1F641, 0x1F642, prExtendedPictographic}, //  7.0  [2] (🙁..🙂)    slightly frowning face..slightly smiling face
+	{0x1F643, 0x1F644, prExtendedPictographic}, //  8.0  [2] (🙃..🙄)    upside-down face..face with rolling eyes
+	{0x1F645, 0x1F64F, prExtendedPictographic}, //  6.0 [11] (🙅..🙏)    person gesturing NO..folded hands
+	{0x1F680, 0x1F6C5, prExtendedPictographic}, //  6.0 [70] (🚀..🛅)    rocket..left luggage
+	{0x1F6C6, 0x1F6CF, prExtendedPictographic}, //  7.0 [10] (🛆..🛏️)    TRIANGLE WITH ROUNDED CORNERS..bed
+	{0x1F6D0, 0x1F6D0, prExtendedPictographic}, //  8.0  [1] (🛐)       place of worship
+	{0x1F6D1, 0x1F6D2, prExtendedPictographic}, //  9.0  [2] (🛑..🛒)    stop sign..shopping cart
+	{0x1F6D3, 0x1F6D4, prExtendedPictographic}, // 10.0  [2] (🛓..🛔)    STUPA..PAGODA
+	{0x1F6D5, 0x1F6D5, prExtendedPictographic}, // 12.0  [1] (🛕)       hindu temple
+	{0x1F6D6, 0x1F6DF, prExtendedPictographic}, //   NA [10] (🛖..🛟)    <reserved-1F6D6>..<reserved-1F6DF>
+	{0x1F6E0, 0x1F6EC, prExtendedPictographic}, //  7.0 [13] (🛠️..🛬)    hammer and wrench..airplane arrival
+	{0x1F6ED, 0x1F6EF, prExtendedPictographic}, //   NA  [3] (🛭..🛯)    <reserved-1F6ED>..<reserved-1F6EF>
+	{0x1F6F0, 0x1F6F3, prExtendedPictographic}, //  7.0  [4] (🛰️..🛳️)    satellite..passenger ship
+	{0x1F6F4, 0x1F6F6, prExtendedPictographic}, //  9.0  [3] (🛴..🛶)    kick scooter..canoe
+	{0x1F6F7, 0x1F6F8, prExtendedPictographic}, // 10.0  [2] (🛷..🛸)    sled..flying saucer
+	{0x1F6F9, 0x1F6F9, prExtendedPictographic}, // 11.0  [1] (🛹)       skateboard
+	{0x1F6FA, 0x1F6FA, prExtendedPictographic}, // 12.0  [1] (🛺)       auto rickshaw
+	{0x1F6FB, 0x1F6FF, prExtendedPictographic}, //   NA  [5] (🛻..🛿)    <reserved-1F6FB>..<reserved-1F6FF>
+	{0x1F774, 0x1F77F, prExtendedPictographic}, //   NA [12] (🝴..🝿)    <reserved-1F774>..<reserved-1F77F>
+	{0x1F7D5, 0x1F7D8, prExtendedPictographic}, // 11.0  [4] (🟕..🟘)    CIRCLED TRIANGLE..NEGATIVE CIRCLED SQUARE
+	{0x1F7D9, 0x1F7DF, prExtendedPictographic}, //   NA  [7] (🟙..🟟)    <reserved-1F7D9>..<reserved-1F7DF>
+	{0x1F7E0, 0x1F7EB, prExtendedPictographic}, // 12.0 [12] (🟠..🟫)    orange circle..brown square
+	{0x1F7EC, 0x1F7FF, prExtendedPictographic}, //   NA [20] (🟬..🟿)    <reserved-1F7EC>..<reserved-1F7FF>
+	{0x1F80C, 0x1F80F, prExtendedPictographic}, //   NA  [4] (🠌..🠏)    <reserved-1F80C>..<reserved-1F80F>
+	{0x1F848, 0x1F84F, prExtendedPictographic}, //   NA  [8] (🡈..🡏)    <reserved-1F848>..<reserved-1F84F>
+	{0x1F85A, 0x1F85F, prExtendedPictographic}, //   NA  [6] (🡚..🡟)    <reserved-1F85A>..<reserved-1F85F>
+	{0x1F888, 0x1F88F, prExtendedPictographic}, //   NA  [8] (🢈..🢏)    <reserved-1F888>..<reserved-1F88F>
+	{0x1F8AE, 0x1F8FF, prExtendedPictographic}, //   NA [82] (🢮..🣿)    <reserved-1F8AE>..<reserved-1F8FF>
+	{0x1F90C, 0x1F90C, prExtendedPictographic}, //   NA  [1] (🤌)       <reserved-1F90C>
+	{0x1F90D, 0x1F90F, prExtendedPictographic}, // 12.0  [3] (🤍..🤏)    white heart..pinching hand
+	{0x1F910, 0x1F918, prExtendedPictographic}, //  8.0  [9] (🤐..🤘)    zipper-mouth face..sign of the horns
+	{0x1F919, 0x1F91E, prExtendedPictographic}, //  9.0  [6] (🤙..🤞)    call me hand..crossed fingers
+	{0x1F91F, 0x1F91F, prExtendedPictographic}, // 10.0  [1] (🤟)       love-you gesture
+	{0x1F920, 0x1F927, prExtendedPictographic}, //  9.0  [8] (🤠..🤧)    cowboy hat face..sneezing face
+	{0x1F928, 0x1F92F, prExtendedPictographic}, // 10.0  [8] (🤨..🤯)    face with raised eyebrow..exploding head
+	{0x1F930, 0x1F930, prExtendedPictographic}, //  9.0  [1] (🤰)       pregnant woman
+	{0x1F931, 0x1F932, prExtendedPictographic}, // 10.0  [2] (🤱..🤲)    breast-feeding..palms up together
+	{0x1F933, 0x1F93A, prExtendedPictographic}, //  9.0  [8] (🤳..🤺)    selfie..person fencing
+	{0x1F93C, 0x1F93E, prExtendedPictographic}, //  9.0  [3] (🤼..🤾)    people wrestling..person playing handball
+	{0x1F93F, 0x1F93F, prExtendedPictographic}, // 12.0  [1] (🤿)       diving mask
+	{0x1F940, 0x1F945, prExtendedPictographic}, //  9.0  [6] (🥀..🥅)    wilted flower..goal net
+	{0x1F947, 0x1F94B, prExtendedPictographic}, //  9.0  [5] (🥇..🥋)    1st place medal..martial arts uniform
+	{0x1F94C, 0x1F94C, prExtendedPictographic}, // 10.0  [1] (🥌)       curling stone
+	{0x1F94D, 0x1F94F, prExtendedPictographic}, // 11.0  [3] (🥍..🥏)    lacrosse..flying disc
+	{0x1F950, 0x1F95E, prExtendedPictographic}, //  9.0 [15] (🥐..🥞)    croissant..pancakes
+	{0x1F95F, 0x1F96B, prExtendedPictographic}, // 10.0 [13] (🥟..🥫)    dumpling..canned food
+	{0x1F96C, 0x1F970, prExtendedPictographic}, // 11.0  [5] (🥬..🥰)    leafy green..smiling face with hearts
+	{0x1F971, 0x1F971, prExtendedPictographic}, // 12.0  [1] (🥱)       yawning face
+	{0x1F972, 0x1F972, prExtendedPictographic}, //   NA  [1] (🥲)       <reserved-1F972>
+	{0x1F973, 0x1F976, prExtendedPictographic}, // 11.0  [4] (🥳..🥶)    partying face..cold face
+	{0x1F977, 0x1F979, prExtendedPictographic}, //   NA  [3] (🥷..🥹)    <reserved-1F977>..<reserved-1F979>
+	{0x1F97A, 0x1F97A, prExtendedPictographic}, // 11.0  [1] (🥺)       pleading face
+	{0x1F97B, 0x1F97B, prExtendedPictographic}, // 12.0  [1] (🥻)       sari
+	{0x1F97C, 0x1F97F, prExtendedPictographic}, // 11.0  [4] (🥼..🥿)    lab coat..flat shoe
+	{0x1F980, 0x1F984, prExtendedPictographic}, //  8.0  [5] (🦀..🦄)    crab..unicorn
+	{0x1F985, 0x1F991, prExtendedPictographic}, //  9.0 [13] (🦅..🦑)    eagle..squid
+	{0x1F992, 0x1F997, prExtendedPictographic}, // 10.0  [6] (🦒..🦗)    giraffe..cricket
+	{0x1F998, 0x1F9A2, prExtendedPictographic}, // 11.0 [11] (🦘..🦢)    kangaroo..swan
+	{0x1F9A3, 0x1F9A4, prExtendedPictographic}, //   NA  [2] (🦣..🦤)    <reserved-1F9A3>..<reserved-1F9A4>
+	{0x1F9A5, 0x1F9AA, prExtendedPictographic}, // 12.0  [6] (🦥..🦪)    sloth..oyster
+	{0x1F9AB, 0x1F9AD, prExtendedPictographic}, //   NA  [3] (🦫..🦭)    <reserved-1F9AB>..<reserved-1F9AD>
+	{0x1F9AE, 0x1F9AF, prExtendedPictographic}, // 12.0  [2] (🦮..🦯)    guide dog..probing cane
+	{0x1F9B0, 0x1F9B9, prExtendedPictographic}, // 11.0 [10] (🦰..🦹)    red hair..supervillain
+	{0x1F9BA, 0x1F9BF, prExtendedPictographic}, // 12.0  [6] (🦺..🦿)    safety vest..mechanical leg
+	{0x1F9C0, 0x1F9C0, prExtendedPictographic}, //  8.0  [1] (🧀)       cheese wedge
+	{0x1F9C1, 0x1F9C2, prExtendedPictographic}, // 11.0  [2] (🧁..🧂)    cupcake..salt
+	{0x1F9C3, 0x1F9CA, prExtendedPictographic}, // 12.0  [8] (🧃..🧊)    beverage box..ice cube
+	{0x1F9CB, 0x1F9CC, prExtendedPictographic}, //   NA  [2] (🧋..🧌)    <reserved-1F9CB>..<reserved-1F9CC>
+	{0x1F9CD, 0x1F9CF, prExtendedPictographic}, // 12.0  [3] (🧍..🧏)    person standing..deaf person
+	{0x1F9D0, 0x1F9E6, prExtendedPictographic}, // 10.0 [23] (🧐..🧦)    face with monocle..socks
+	{0x1F9E7, 0x1F9FF, prExtendedPictographic}, // 11.0 [25] (🧧..🧿)    red envelope..nazar amulet
+	{0x1FA00, 0x1FA53, prExtendedPictographic}, // 12.0 [84] (🨀..🩓)    NEUTRAL CHESS KING..BLACK CHESS KNIGHT-BISHOP
+	{0x1FA54, 0x1FA5F, prExtendedPictographic}, //   NA [12] (🩔..🩟)    <reserved-1FA54>..<reserved-1FA5F>
+	{0x1FA60, 0x1FA6D, prExtendedPictographic}, // 11.0 [14] (🩠..🩭)    XIANGQI RED GENERAL..XIANGQI BLACK SOLDIER
+	{0x1FA6E, 0x1FA6F, prExtendedPictographic}, //   NA  [2] (🩮..🩯)    <reserved-1FA6E>..<reserved-1FA6F>
+	{0x1FA70, 0x1FA73, prExtendedPictographic}, // 12.0  [4] (🩰..🩳)    ballet shoes..shorts
+	{0x1FA74, 0x1FA77, prExtendedPictographic}, //   NA  [4] (🩴..🩷)    <reserved-1FA74>..<reserved-1FA77>
+	{0x1FA78, 0x1FA7A, prExtendedPictographic}, // 12.0  [3] (🩸..🩺)    drop of blood..stethoscope
+	{0x1FA7B, 0x1FA7F, prExtendedPictographic}, //   NA  [5] (🩻..🩿)    <reserved-1FA7B>..<reserved-1FA7F>
+	{0x1FA80, 0x1FA82, prExtendedPictographic}, // 12.0  [3] (🪀..🪂)    yo-yo..parachute
+	{0x1FA83, 0x1FA8F, prExtendedPictographic}, //   NA [13] (🪃..🪏)    <reserved-1FA83>..<reserved-1FA8F>
+	{0x1FA90, 0x1FA95, prExtendedPictographic}, // 12.0  [6] (🪐..🪕)    ringed planet..banjo
+	{0x1FA96, 0x1FFFD, prExtendedPictographic}, //   NA[1384] (🪖..🿽)   <reserved-1FA96>..<reserved-1FFFD>
+	{0xE0000, 0xE0000, prControl},              // Cn       <reserved-E0000>
+	{0xE0001, 0xE0001, prControl},              // Cf       LANGUAGE TAG
+	{0xE0002, 0xE001F, prControl},              // Cn  [30] <reserved-E0002>..<reserved-E001F>
+	{0xE0020, 0xE007F, prExtend},               // Cf  [96] TAG SPACE..CANCEL TAG
+	{0xE0080, 0xE00FF, prControl},              // Cn [128] <reserved-E0080>..<reserved-E00FF>
+	{0xE0100, 0xE01EF, prExtend},               // Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
+	{0xE01F0, 0xE0FFF, prControl},              // Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
+}
+
+// property returns the Unicode property value (see constants above) of the
+// given code point.
+func property(r rune) int {
+	// Run a binary search.
+	from := 0
+	to := len(codePoints)
+	for to > from {
+		middle := (from + to) / 2
+		cpRange := codePoints[middle]
+		if int(r) < cpRange[0] {
+			to = middle
+			continue
+		}
+		if int(r) > cpRange[1] {
+			from = middle + 1
+			continue
+		}
+		return cpRange[2]
+	}
+	return prAny
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitignore b/vendor/github.com/uber/jaeger-client-go/.gitignore
new file mode 100644
index 0000000..2734907
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitignore
@@ -0,0 +1,15 @@
+*.out
+*.test
+*.xml
+*.swp
+.idea/
+.tmp/
+*.iml
+*.cov
+*.html
+*.log
+gen/thrift/js
+gen/thrift/py
+vendor/
+crossdock-main
+crossdock/jaeger-docker-compose.yml
diff --git a/vendor/github.com/uber/jaeger-client-go/.gitmodules b/vendor/github.com/uber/jaeger-client-go/.gitmodules
new file mode 100644
index 0000000..295ebcf
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.gitmodules
@@ -0,0 +1,3 @@
+[submodule "idl"]
+	path = idl
+	url = https://github.com/uber/jaeger-idl.git
diff --git a/vendor/github.com/uber/jaeger-client-go/.travis.yml b/vendor/github.com/uber/jaeger-client-go/.travis.yml
new file mode 100644
index 0000000..435aea1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/.travis.yml
@@ -0,0 +1,56 @@
+sudo: required
+
+language: go
+go_import_path: github.com/uber/jaeger-client-go
+
+dist: trusty
+
+matrix:
+  include:
+  # - go: 1.15.x
+  #   env:
+  #   - TESTS=true
+  #   - USE_DEP=true
+  #   - COVERAGE=true
+  - go: 1.15.x
+    env:
+    - USE_DEP=true
+    - CROSSDOCK=true
+  # - go: 1.15.x
+  #   env:
+  #   - TESTS=true
+  #   - USE_DEP=false
+  #   - USE_GLIDE=true
+  # test with previous version of Go
+  # - go: 1.14.x
+  #   env:
+  #   - TESTS=true
+  #   - USE_DEP=true
+  #   - CI_SKIP_LINT=true
+
+services:
+  - docker
+
+env:
+  global:
+    - DOCKER_COMPOSE_VERSION=1.8.0
+    - COMMIT=${TRAVIS_COMMIT::8}
+    # DOCKER_PASS
+    - secure: "CnjVyxNvMC/dhr/eR7C+FiWucZ4/O5LfAuz9YU0qlnV6XLR7XXRtzZlfFKIImJT6xHp+OptTqAIXqUbvwK2OXDP1ZsLiWRm+2elb9/isGusWXjs3g817lX8njSUcIFILbfi+vAE7UD2BKjHxpmvWmCZidisU1rcaZ9OQNPqMnNIDxVx0FOTwYx+2hfkdjnN5dikzafBDQ6ZZV/mGbcaTG45GGFU6DHyVLzf9qCPXyXnz2+VDhcoPQsYkzE56XHCmHxvEfXxgfqYefJNUlFPhniAQySVsCNVDJ8QcCV6uHaXoIzxJKx9FdUnWKI1/AtpQsTZPgEm4Ujnt+kGJsXopXy2Xx4MZxmcTCBwAMjZxPMF7KoojbtDeOZgEMtf1tGPN6DTNc3NpVmr0BKZ44lhqk+vnd8HAiC1tHDEoSb1Esl7dMUUf1qZAh3MtT+NYi3mTwyx/ilXUS7KPyy7x0ezB3kGuMoLhvR2hrprqRr5NOV2hrd1au+IXmb+4IanFOsBlceBfs8P0JFMO/aw15r+HimSZpQsJx//IT0LReCZYXLe0/WVsF/8+HDwHKlO99gGpk4iXlNKKvdPWabihMp3I3peMrvL+jnlwh47RqHs/0Q71xsKjVWTn+Svq3FpVP0Pgyxhg+oG4WEByBiLnBQcZwSBhWexkJrNI73GzaZiIldk="
+    # DOCKER_USER
+    - secure: "bpBSmypHzI4PnteM4cwLiMC2163Sj/4mEl+1dj+6NWl2tr1hREeVXKhsWBpah25n6BDyr2A4yhBZcWLaNKrsCKT3U37csAQTOFVeQ9x5xhPq+ohANd/OsspFsxNZaKwx161LizH/uTDotMxxevZacsyYWGNv/cRFkwcQ8upLkReRR6puJ+jNQC0BFpKWBJY/zpm5J7xFb7FO20LvQVyRgsgzqWmg9oRNVw9uwOfSY3btacftYctDLUbAr8YRNHd2C6dZnMAi8KdDTLXKTqjKmp6WidOmi92Ml7tOjB+bV6TOaVAhrcI5Rdje4rRWG4MucAjPMP0ZBW36KTfcGqFUcDhX7UqISe2WxoI+8ZD6fJ+nNtD3bk4YAUJB4BSs2sQdiYyjpHyGJR6RW50+3uRz2YbXpzVr9wqv2lZSl/xy3wC5Hag55uqzVlSiDw2pK8lctT3dnQveE7PqAI577PjF2NrHlgrBbykOwwUCNbRTmykzqoDnkxclmiZ+rflEeWsSYglePK/d6Gj9+N7wJZM5heprdJJMFTrzMWZ21Ll9ZGY9updCBKmJA8pBYiLHbu0lWOp+9QUGC+621Zq0d1PHhN6L4eXk/f3RNoZTr//cX6WdNmmO7tBbaGpmp/UYiYTY1WO9vP7tCDsT75k285HCfnIrlGRdbCZZbfuYNGPKIQ0="
+
+install:
+  - make install-ci USE_DEP=$USE_DEP
+  - if [ "$CROSSDOCK" == true ]; then bash ./travis/install-crossdock-deps.sh ; fi
+
+script:
+  - if [ "$TESTS" == true ]; then make test-ci ; else echo 'skipping tests'; fi
+  - if [ "$CROSSDOCK" == true ]; then bash ./travis/build-crossdock.sh ; else echo 'skipping crossdock'; fi
+
+after_success:
+  - if [ "$COVERAGE" == true ]; then mv cover.out coverage.txt ; else echo 'skipping coverage'; fi
+  - if [ "$COVERAGE" == true ]; then bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi
+
+after_failure:
+  - if [ "$CROSSDOCK" == true ]; then timeout 5 docker-compose -f crossdock/docker-compose.yml logs; fi
diff --git a/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
new file mode 100644
index 0000000..956790e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
@@ -0,0 +1,390 @@
+Changes by Version
+==================
+
+2.29.2 (unreleased)
+-------------------
+- Nothing yet.
+
+
+2.29.1 (2021-05-24)
+-------------------
+- Remove dependency on "testing" in "thrift" (#586) -- @yurishkuro
+
+
+2.29.0 (2021-05-20)
+-------------------
+- Update vendored thrift to 0.14.1 (#584) -- @nhatthm
+
+
+2.28.0 (2021-04-30)
+-------------------
+- HTTPSamplingStrategyFetcher: Use http client with 10 second timeout (#578) -- Joe Elliott
+
+
+2.27.0 (2021-04-19)
+-------------------
+- Don't override HTTP Reporter batch size to 1; default to 100, user can override (#571) -- R. Aidan Campbell
+
+
+2.26.0 (2021-04-16)
+-------------------
+- Delete a baggage item when value is blank (#562) -- evan.kim
+- Trim baggage key when parsing (#566) -- sicong.huang
+- feat: extend configuration to support custom randomNumber func (#555) -- NemoO_o
+- Support JAEGER_TRACEID_128BIT env var (#547) -- Yuri Shkuro
+- Additional context protections (#544) -- Joe Elliott
+- Lock RemotelyControlledSampler.sampler on callbacks (#543) -- Dima
+- Upgrade build to Go 1.15 (#539) -- Yuri Shkuro
+- Upgrade to jaeger-lib@2.3.0 to fix broken codahale/hdrhistogram dependency (#537) -- Yuri Shkuro
+- Prefix TraceID/SpanID.String() with zeroes (#533) -- Lukas Vogel
+- Upgrade to OpenTracing Go 1.2 (#525) -- Yuri Shkuro
+
+
+2.25.0 (2020-07-13)
+-------------------
+## Breaking changes
+- [feat] Periodically re-resolve UDP server address, with opt-out (#520) -- Trevor Foster
+
+  The re-resolving of UDP address is now enabled by default, to make the client more robust in Kubernetes deployments.
+  The old resolve-once behavior can be restored by setting DisableAttemptReconnecting=true in the Configuration struct,
+  or via JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED=true environment variable.
+
+## Bug fixes
+- Do not add invalid context to references (#521) -- Yuri Shkuro
+
+
+2.24.0 (2020-06-14)
+-------------------
+- Mention FromEnv() in the README, docs, and examples (#518) -- Martin Lercher
+- Serialize access to RemotelyControlledSampler.sampler (#515) -- Dima
+- Override reporter config only when agent host/port is set in env (#513) -- ilylia
+- Converge on JAEGER_SAMPLING_ENDPOINT env variable (#511) -- Eundoo Song
+
+
+2.23.1 (2020-04-28)
+-------------------
+- Fix regression by handling nil logger correctly ([#507](https://github.com/jaegertracing/jaeger-client-go/pull/507)) -- Prithvi Raj
+
+
+2.23.0 (2020-04-22)
+-------------------
+
+- Add the ability to log all span interactions at a new debug log level([#502](https://github.com/jaegertracing/jaeger-client-go/pull/502), [#503](https://github.com/jaegertracing/jaeger-client-go/pull/503), [#504](https://github.com/jaegertracing/jaeger-client-go/pull/504)) -- Prithvi Raj
+- Chore (docs): fix typos ([#496](https://github.com/jaegertracing/jaeger-client-go/pull/496), [#498](https://github.com/jaegertracing/jaeger-client-go/pull/498)) -- Febrian Setianto and Ivan Babrou
+- Unset highest bit of traceID in probabilistic sampler ([#490](https://github.com/jaegertracing/jaeger-client-go/pull/490)) -- Sokolov Yura
+
+2.22.1 (2020-01-16)
+-------------------
+
+- Increase UDP batch overhead to account for data loss metrics ([#488](https://github.com/jaegertracing/jaeger-client-go/pull/488)) -- Yuri Shkuro
+
+
+2.22.0 (2020-01-15)
+-------------------
+
+- Report data loss stats to Jaeger backend ([#482](https://github.com/jaegertracing/jaeger-client-go/pull/482)) -- Yuri Shkuro
+- Add limit on log records per span ([#483](https://github.com/jaegertracing/jaeger-client-go/pull/483)) -- Sokolov Yura
+
+
+2.21.1 (2019-12-20)
+-------------------
+
+- Update version correctly.
+
+
+2.21.0 (2019-12-20)
+-------------------
+
+- Clarify reporting error logs ([#469](https://github.com/jaegertracing/jaeger-client-go/pull/469)) -- Yuri Shkuro
+- Do not strip leading zeros from trace IDs ([#472](https://github.com/jaegertracing/jaeger-client-go/pull/472)) -- Yuri Shkuro
+- Chore (docs): fixed a couple of typos ([#475](https://github.com/jaegertracing/jaeger-client-go/pull/475)) -- Marc Bramaud
+- Support custom HTTP headers when reporting spans over HTTP ([#479](https://github.com/jaegertracing/jaeger-client-go/pull/479)) -- Albert Teoh
+
+
+2.20.1 (2019-11-08)
+-------------------
+
+Minor patch via https://github.com/jaegertracing/jaeger-client-go/pull/468
+
+- Make `AdaptiveSamplerUpdater` usable with default values; Resolves #467
+- Create `OperationNameLateBinding` sampler option and config option
+- Make `SamplerOptions` var of public type, so that its functions are discoverable via godoc
+
+
+2.20.0 (2019-11-06)
+-------------------
+
+## New Features
+
+- Allow all in-process spans of a trace to share sampling state (#443) -- Prithvi Raj
+
+  Sampling state is shared between all spans of the trace that are still in memory.
+  This allows implementation of delayed sampling decisions (see below).
+
+- Support delayed sampling decisions (#449) -- Yuri Shkuro
+
+  This is a large structural change to how the samplers work.
+  It allows some samplers to be executed multiple times on different
+  span events (like setting a tag) and make a positive sampling decision
+  later in the span life cycle, or even based on children spans.
+  See [README](./README.md#delayed-sampling) for more details.
+
+  There is a related minor change in behavior of the adaptive (per-operation) sampler,
+  which will no longer re-sample the trace when `span.SetOperation()` is called, i.e. the
+  operation used to make the sampling decision is always the one provided at span creation.
+
+- Add experimental tag matching sampler (#452) -- Yuri Shkuro
+
+  A sampler that can sample a trace based on a certain tag added to the root
+  span or one of its local (in-process) children. The sampler can be used with
+  another experimental `PrioritySampler` that allows multiple samplers to try
+  to make a sampling decision, in a certain priority order.
+
+- [log/zap] Report whether a trace was sampled (#445) -- Abhinav Gupta
+- Allow config.FromEnv() to enrich an existing config object (#436) -- Vineeth Reddy
+
+## Minor patches
+
+- Expose Sampler on Tracer and accept sampler options via Configuration (#460) -- Yuri Shkuro
+- Fix github.com/uber-go/atomic import (#464) -- Yuri Shkuro
+- Add nodejs to crossdock tests (#441) -- Bhavin Gandhi
+- Bump Go compiler version to 1.13 (#453) -- Yuri Shkuro
+
+2.19.0 (2019-09-23)
+-------------------
+
+- Upgrade jaeger-lib to 2.2 and unpin Prom client (#434) -- Yuri Shkuro
+
+
+2.18.1 (2019-09-16)
+-------------------
+
+- Remove go.mod / go.sum that interfere with `go get` (#432)
+
+
+2.18.0 (2019-09-09)
+-------------------
+
+- Add option "noDebugFlagOnForcedSampling" for tracer initialization [resolves #422] (#423) <Jun Guo>
+
+
+2.17.0 (2019-08-30)
+-------------------
+
+- Add a flag for firehose mode (#419) <Prithvi Raj>
+- Default sampling server URL to agent (#414) <Bryan Boreham>
+- Update default sampling rate when sampling strategy is refreshed (#413) <Bryan Boreham>
+- Support "Self" Span Reference (#411) <dm03514>
+- Don't complain about blank service name if tracing is Disabled (#410) Yuri <Shkuro>
+- Use IP address from tag if exist (#402) <NikoKVCS>
+- Expose span data to custom reporters [fixes #394] (#399) <Curtis Allen>
+- Fix the span allocation in the pool (#381) <Dmitry Ponomarev>
+
+
+2.16.0 (2019-03-24)
+-------------------
+
+- Add baggage to B3 codec (#319) <Pavol Loffay>
+- Add support for 128bit trace ids to zipkin thrift spans. (#378) <Douglas Reid>
+- Update zipkin propagation logic to support 128bit traceIDs (#373) <Douglas Reid>
+- Accept "true" for the x-b3-sampled header (#356) <Adrian Bogatu>
+
+- Allow setting of PoolSpans from Config object (#322) <Matthew Pound>
+- Make propagators public to allow wrapping (#379) <Ivan Babrou>
+- Change default metric namespace to use relevant separator for the metric backend (#364) <Gary Brown>
+- Change metrics prefix to jaeger_tracer and add descriptions (#346) <Gary Brown>
+- Bump OpenTracing to ^1.1.x (#383) <Yuri Shkuro>
+- Upgrade jaeger-lib to v2.0.0 (#359) <Gary Brown>
+- Avoid defer when generating random number (#358) <Gary Brown>
+- Use a pool of rand.Source to reduce lock contention when creating span ids (#357) <Gary Brown>
+- Make JAEGER_ENDPOINT take priority over JAEGER_AGENT_XXX (#342) <Eundoo Song>
+
+
+2.15.0 (2018-10-10)
+-------------------
+
+- Fix FollowsFrom spans ignoring baggage/debug header from dummy parent context (#313) <Zvi Cahana>
+- Make maximum annotation length configurable in tracer options (#318) <Eric Chang>
+- Support more environment variables in configuration (#323) <Daneyon Hansen>
+- Print error on Sampler Query failure (#328) <Goutham Veeramachaneni>
+- Add an HTTPOption to support custom http.RoundTripper (#333) <Michael Puncel>
+- Return an error when an HTTP error code is seen in zipkin HTTP transport (#331) <Michael Puncel>
+
+
+2.14.0 (2018-04-30)
+-------------------
+
+- Support throttling for debug traces (#274) <Isaac Hier>
+- Remove dependency on Apache Thrift (#303) <Yuri Shkuro>
+- Remove dependency on tchannel  (#295) (#294) <Yuri Shkuro>
+- Test with Go 1.9 (#298) <Yuri Shkuro>
+
+
+2.13.0 (2018-04-15)
+-------------------
+
+- Use value receiver for config.NewTracer() (#283) <Yuri Shkuro>
+- Lock span during jaeger thrift conversion (#273) <Won Jun Jang>
+- Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro>
+- Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling>
+- Allow overriding sampler in the Config (#270) <Mike Kabischev>
+
+
+2.12.0 (2018-03-14)
+-------------------
+
+- Use lock when retrieving span.Context() (#268)
+- Add Configuration support for custom Injector and Extractor (#263) <Martin Liu>
+
+
+2.11.2 (2018-01-12)
+-------------------
+
+- Add Gopkg.toml to allow using the lib with `dep`
+
+
+2.11.1 (2018-01-03)
+-------------------
+
+- Do not enqueue spans after Reporter is closed (#235, #245)
+- Change default flush interval to 1sec (#243)
+
+
+2.11.0 (2017-11-27)
+-------------------
+
+- Normalize metric names and tags to be compatible with Prometheus (#222)
+
+
+2.10.0 (2017-11-14)
+-------------------
+
+- Support custom tracing headers (#176)
+- Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182)
+- Do not coerce baggage keys to lower case (#196)
+- Log span name when span cannot be reported (#198)
+- Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219)
+
+
+2.9.0 (2017-07-29)
+------------------
+
+- Pin thrift <= 0.10 (#179)
+- Introduce a parallel interface ContribObserver (#159)
+
+
+2.8.0 (2017-07-05)
+------------------
+
+- Drop `jaeger.` prefix from `jaeger.hostname` process-level tag
+- Add options to set tracer tags
+
+
+2.7.0 (2017-06-21)
+------------------
+
+- Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140)
+- Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147)
+- Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153)
+- Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158)
+- Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161)
+
+
+2.6.0 (2017-03-28)
+------------------
+
+- Add config option to initialize RPC Metrics feature
+
+
+2.5.0 (2017-03-23)
+------------------
+
+- Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123)
+- Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124)
+- Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125)
+
+
+2.4.0 (2017-03-21)
+------------------
+
+- Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121)
+- Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121)
+
+
+2.3.0 (2017-03-20)
+------------------
+
+- Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117)
+- Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118)
+
+
+2.2.1 (2017-03-14)
+------------------
+
+- Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111)
+
+
+2.2.0 (2017-03-10)
+------------------
+
+- Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94)
+- Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103)
+
+
+2.1.2 (2017-02-27)
+-------------------
+
+- Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99)
+- Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100)
+- Add tracer initialization godoc examples
+
+
+2.1.1 (2017-02-21)
+-------------------
+
+- Fix inefficient usage of zap.Logger
+
+
+2.1.0 (2017-02-17)
+-------------------
+
+- Add adapter for zap.Logger (https://github.com/uber-go/zap)
+- Move logging API to ./log/ package
+
+
+2.0.0 (2017-02-08)
+-------------------
+
+- Support Adaptive Sampling
+- Support 128bit Trace IDs
+- Change trace/span IDs from uint64 to strong types TraceID and SpanID
+- Add Zipkin HTTP B3 Propagation format support #72
+- Rip out existing metrics and use github.com/uber/jaeger-lib/metrics
+- Change API for tracer, reporter, sampler initialization
+
+
+1.6.0 (2016-10-14)
+-------------------
+
+- Add Zipkin HTTP transport
+- Support external baggage via jaeger-baggage header
+- Unpin Thrift version, keep to master
+
+
+1.5.1 (2016-09-27)
+-------------------
+
+- Relax dependency on opentracing to ^1
+
+
+1.5.0 (2016-09-27)
+-------------------
+
+- Upgrade to opentracing-go 1.0
+- Support KV logging for Spans
+
+
+1.4.0 (2016-09-14)
+-------------------
+
+- Support debug traces via HTTP header "jaeger-debug-id"
diff --git a/vendor/github.com/uber/jaeger-client-go/CODEOWNERS b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
new file mode 100644
index 0000000..0572efc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CODEOWNERS
@@ -0,0 +1,2 @@
+
+* @jaegertracing/jaeger-maintainers
diff --git a/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
new file mode 100644
index 0000000..41e2154
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
@@ -0,0 +1,170 @@
+# How to Contribute to Jaeger
+
+We'd love your help!
+
+Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub
+pull requests. This document outlines some of the conventions on development
+workflow, commit message formatting, contact points and other resources to make
+it easier to get your contribution accepted.
+
+We gratefully welcome improvements to documentation as well as to code.
+
+# Certificate of Origin
+
+By contributing to this project you agree to the [Developer Certificate of
+Origin](https://developercertificate.org/) (DCO). This document was created
+by the Linux Kernel community and is a simple statement that you, as a
+contributor, have the legal right to make the contribution. See the [DCO](DCO)
+file for details.
+
+## Getting Started
+
+This library uses [dep](https://golang.github.io/dep/) to manage dependencies.
+
+To get started, make sure you clone the Git repository into the correct location
+`github.com/uber/jaeger-client-go` relative to `$GOPATH`:
+
+```
+mkdir -p $GOPATH/src/github.com/uber
+cd $GOPATH/src/github.com/uber
+git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go
+cd jaeger-client-go
+git submodule update --init --recursive
+```
+
+Then install dependencies and run the tests:
+
+```
+make install
+make test
+```
+
+## Imports grouping
+
+This projects follows the following pattern for grouping imports in Go files:
+  * imports from standard library
+  * imports from other projects
+  * imports from `jaeger-client-go` project
+
+For example:
+
+```go
+import (
+	"fmt"
+
+	"github.com/uber/jaeger-lib/metrics"
+	"go.uber.org/zap"
+
+	"github.com/uber/jaeger-client-go/config"
+)
+```
+
+## Making A Change
+
+*Before making any significant changes, please [open an
+issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed
+changes ahead of time will make the contribution process smooth for everyone.
+
+Once we've discussed your changes and you've got your code ready, make sure
+that tests are passing (`make test` or `make cover`) and open your PR. Your
+pull request is most likely to be accepted if it:
+
+* Includes tests for new functionality.
+* Follows the guidelines in [Effective
+  Go](https://golang.org/doc/effective_go.html) and the [Go team's common code
+  review comments](https://github.com/golang/go/wiki/CodeReviewComments).
+* Has a [good commit message](https://chris.beams.io/posts/git-commit/):
+   * Separate subject from body with a blank line
+   * Limit the subject line to 50 characters
+   * Capitalize the subject line
+   * Do not end the subject line with a period
+   * Use the imperative mood in the subject line
+   * Wrap the body at 72 characters
+   * Use the body to explain _what_ and _why_ instead of _how_
+* Each commit must be signed by the author ([see below](#sign-your-work)).
+
+## License
+
+By contributing your code, you agree to license your contribution under the terms
+of the [Apache License](LICENSE).
+
+If you are adding a new file it should have a header like below.  The easiest
+way to add such header is to run `make fmt`.
+
+```
+// Copyright (c) 2017 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+```
+
+## Sign your work
+
+The sign-off is a simple line at the end of the explanation for the
+patch, which certifies that you wrote it or otherwise have the right to
+pass it on as an open-source patch.  The rules are pretty simple: if you
+can certify the below (from
+[developercertificate.org](http://developercertificate.org/)):
+
+```
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
+```
+
+then you just add a line to every git commit message:
+
+    Signed-off-by: Joe Smith <joe@gmail.com>
+
+using your real name (sorry, no pseudonyms or anonymous contributions.)
+
+You can add the sign off when creating the git commit via `git commit -s`.
+
+If you want this to be automatic you can set up some aliases:
+
+```
+git config --add alias.amend "commit -s --amend"
+git config --add alias.c "commit -s"
+```
diff --git a/vendor/github.com/uber/jaeger-client-go/DCO b/vendor/github.com/uber/jaeger-client-go/DCO
new file mode 100644
index 0000000..068953d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/DCO
@@ -0,0 +1,37 @@
+Developer Certificate of Origin
+Version 1.1
+
+Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
+660 York Street, Suite 102,
+San Francisco, CA 94110 USA
+
+Everyone is permitted to copy and distribute verbatim copies of this
+license document, but changing it is not allowed.
+
+
+Developer's Certificate of Origin 1.1
+
+By making a contribution to this project, I certify that:
+
+(a) The contribution was created in whole or in part by me and I
+    have the right to submit it under the open source license
+    indicated in the file; or
+
+(b) The contribution is based upon previous work that, to the best
+    of my knowledge, is covered under an appropriate open source
+    license and I have the right under that license to submit that
+    work with modifications, whether created in whole or in part
+    by me, under the same open source license (unless I am
+    permitted to submit under a different license), as indicated
+    in the file; or
+
+(c) The contribution was provided directly to me by some other
+    person who certified (a), (b) or (c) and I have not modified
+    it.
+
+(d) I understand and agree that this project and the contribution
+    are public and that a record of the contribution (including all
+    personal information I submit with it, including my sign-off) is
+    maintained indefinitely and may be redistributed consistent with
+    this project or the open source license(s) involved.
+
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.lock b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
new file mode 100644
index 0000000..268289b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.lock
@@ -0,0 +1,301 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+  digest = "1:4c4c33075b704791d6a7f09dfb55c66769e8a1dc6adf87026292d274fe8ad113"
+  name = "github.com/HdrHistogram/hdrhistogram-go"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "3a0bb77429bd3a61596f5e8a3172445844342120"
+  version = "0.9.0"
+
+[[projects]]
+  digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d"
+  name = "github.com/beorn7/perks"
+  packages = ["quantile"]
+  pruneopts = "UT"
+  revision = "37c8de3658fcb183f997c4e13e8337516ab753e6"
+  version = "v1.0.1"
+
+[[projects]]
+  branch = "master"
+  digest = "1:a382acd6150713655ded76ab5fbcbc7924a7808dab4312dda5d1f23dd8ce5277"
+  name = "github.com/crossdock/crossdock-go"
+  packages = [
+    ".",
+    "assert",
+    "require",
+  ]
+  pruneopts = "UT"
+  revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361"
+
+[[projects]]
+  digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec"
+  name = "github.com/davecgh/go-spew"
+  packages = ["spew"]
+  pruneopts = "UT"
+  revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73"
+  version = "v1.1.1"
+
+[[projects]]
+  digest = "1:7ae311278f7ccaa724de8f2cdec0a507ba3ee6dea8c77237e8157bcf64b0f28b"
+  name = "github.com/golang/mock"
+  packages = ["gomock"]
+  pruneopts = "UT"
+  revision = "f7b1909c82a8958747e5c87c6a5c3b2eaed8a33d"
+  version = "v1.4.4"
+
+[[projects]]
+  digest = "1:4a32eb57407190eced21a21abee9ce4d4ab6f0bf113ca61cb1cb2d549a65c985"
+  name = "github.com/golang/protobuf"
+  packages = [
+    "proto",
+    "ptypes",
+    "ptypes/any",
+    "ptypes/duration",
+    "ptypes/timestamp",
+  ]
+  pruneopts = "UT"
+  revision = "d04d7b157bb510b1e0c10132224b616ac0e26b17"
+  version = "v1.4.2"
+
+[[projects]]
+  digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc"
+  name = "github.com/matttproud/golang_protobuf_extensions"
+  packages = ["pbutil"]
+  pruneopts = "UT"
+  revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c"
+  version = "v1.0.1"
+
+[[projects]]
+  digest = "1:fe5217d44ae8fb84f711968816fe50077cea9dfa8f44425b8e44e7e3de896d01"
+  name = "github.com/opentracing/opentracing-go"
+  packages = [
+    ".",
+    "ext",
+    "harness",
+    "log",
+  ]
+  pruneopts = "UT"
+  revision = "d34af3eaa63c4d08ab54863a4bdd0daa45212e12"
+  version = "v1.2.0"
+
+[[projects]]
+  digest = "1:9e1d37b58d17113ec3cb5608ac0382313c5b59470b94ed97d0976e69c7022314"
+  name = "github.com/pkg/errors"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "614d223910a179a466c1767a985424175c39b465"
+  version = "v0.9.1"
+
+[[projects]]
+  digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe"
+  name = "github.com/pmezard/go-difflib"
+  packages = ["difflib"]
+  pruneopts = "UT"
+  revision = "792786c7400a136282c1664665ae0a8db921c6c2"
+  version = "v1.0.0"
+
+[[projects]]
+  digest = "1:7097829edd12fd7211fca0d29496b44f94ef9e6d72f88fb64f3d7b06315818ad"
+  name = "github.com/prometheus/client_golang"
+  packages = [
+    "prometheus",
+    "prometheus/internal",
+  ]
+  pruneopts = "UT"
+  revision = "170205fb58decfd011f1550d4cfb737230d7ae4f"
+  version = "v1.1.0"
+
+[[projects]]
+  digest = "1:0db23933b8052702d980a3f029149b3f175f7c0eea0cff85b175017d0f2722c0"
+  name = "github.com/prometheus/client_model"
+  packages = ["go"]
+  pruneopts = "UT"
+  revision = "7bc5445566f0fe75b15de23e6b93886e982d7bf9"
+  version = "v0.2.0"
+
+[[projects]]
+  digest = "1:4407525bde4e6ad9c1f60113d38cbb255d769e0ea506c8cf877db39da7753b3a"
+  name = "github.com/prometheus/common"
+  packages = [
+    "expfmt",
+    "internal/bitbucket.org/ww/goautoneg",
+    "model",
+  ]
+  pruneopts = "UT"
+  revision = "317b7b125e8fddda956d0c9574e5f03f438ed5bc"
+  version = "v0.14.0"
+
+[[projects]]
+  digest = "1:b2268435af85ee1a0fca0e37de4225f78e2d9d8b0b66acde3a29f127634efa87"
+  name = "github.com/prometheus/procfs"
+  packages = [
+    ".",
+    "internal/fs",
+    "internal/util",
+  ]
+  pruneopts = "UT"
+  revision = "9dece15c53cd5e9fbfbd72d5108adcf526a3f486"
+  version = "v0.2.0"
+
+[[projects]]
+  digest = "1:86ff4af7b6bb3d27c2e89b5ef8c139678acff1cad74a3c5235fc5af6b94fcc9e"
+  name = "github.com/stretchr/objx"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "35313a95ee26395aa17d366c71a2ccf788fa69b6"
+  version = "v0.3.0"
+
+[[projects]]
+  digest = "1:5201127841a78d84d0ca68a2e564c08e3882c0fb9321a75997ce87926e0d63ea"
+  name = "github.com/stretchr/testify"
+  packages = [
+    "assert",
+    "mock",
+    "require",
+    "suite",
+  ]
+  pruneopts = "UT"
+  revision = "f654a9112bbeac49ca2cd45bfbe11533c4666cf8"
+  version = "v1.6.1"
+
+[[projects]]
+  digest = "1:4af46f2faea30e52c96ec9ec32bb654d2729579a80d242b0acfa193ad321eb61"
+  name = "github.com/uber/jaeger-lib"
+  packages = [
+    "metrics",
+    "metrics/metricstest",
+    "metrics/prometheus",
+  ]
+  pruneopts = "UT"
+  revision = "48cc1df63e6be0d63b95677f0d22beb880bce1e4"
+  version = "v2.3.0"
+
+[[projects]]
+  digest = "1:7a3de4371d6b68c6f37a0df2c09905664d9de59026c91cbe275aae55f4fe760f"
+  name = "go.uber.org/atomic"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "12f27ba2637fa0e13772a4f05fa46a5d18d53182"
+  version = "v1.7.0"
+
+[[projects]]
+  digest = "1:e9eeeabfd025a5e69b9c8e2857d3517ea67e747ae913bcb0a9e1e7bafdb9c298"
+  name = "go.uber.org/multierr"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "3114a8b704d2d28dbacda34a872690aaef66aeed"
+  version = "v1.6.0"
+
+[[projects]]
+  digest = "1:0398f5f0e2e9233f25fad702f3b323241daf9f876cc869ab259238cf1bced236"
+  name = "go.uber.org/zap"
+  packages = [
+    ".",
+    "buffer",
+    "internal/bufferpool",
+    "internal/color",
+    "internal/exit",
+    "zapcore",
+    "zaptest/observer",
+  ]
+  pruneopts = "UT"
+  revision = "404189cf44aea95b0cd9bddcb0242dd4cf88c510"
+  version = "v1.16.0"
+
+[[projects]]
+  branch = "master"
+  digest = "1:f8b491a7c25030a895a0e579742d07136e6958e77ef2d46e769db8eec4e58fcd"
+  name = "golang.org/x/net"
+  packages = [
+    "context",
+    "context/ctxhttp",
+  ]
+  pruneopts = "UT"
+  revision = "328152dc79b1547da63f950cd4cdd9afd50b2774"
+
+[[projects]]
+  branch = "master"
+  digest = "1:1e581fa394685ef0d84008ae04cf3414390c1a700c04846853869cb4ac2fec86"
+  name = "golang.org/x/sys"
+  packages = [
+    "internal/unsafeheader",
+    "unix",
+    "windows",
+  ]
+  pruneopts = "UT"
+  revision = "d9f96fdee20d1e5115ee34ba4016eae6cfb66eb9"
+
+[[projects]]
+  digest = "1:fd328c5b52e433ea3ffc891bcc4f94469a82bf478558208db2b386aad8a304a1"
+  name = "google.golang.org/protobuf"
+  packages = [
+    "encoding/prototext",
+    "encoding/protowire",
+    "internal/descfmt",
+    "internal/descopts",
+    "internal/detrand",
+    "internal/encoding/defval",
+    "internal/encoding/messageset",
+    "internal/encoding/tag",
+    "internal/encoding/text",
+    "internal/errors",
+    "internal/fieldsort",
+    "internal/filedesc",
+    "internal/filetype",
+    "internal/flags",
+    "internal/genid",
+    "internal/impl",
+    "internal/mapsort",
+    "internal/pragma",
+    "internal/set",
+    "internal/strs",
+    "internal/version",
+    "proto",
+    "reflect/protoreflect",
+    "reflect/protoregistry",
+    "runtime/protoiface",
+    "runtime/protoimpl",
+    "types/known/anypb",
+    "types/known/durationpb",
+    "types/known/timestamppb",
+  ]
+  pruneopts = "UT"
+  revision = "3f7a61f89bb6813f89d981d1870ed68da0b3c3f1"
+  version = "v1.25.0"
+
+[[projects]]
+  branch = "v3"
+  digest = "1:229cb0f6192914f518cc1241ede6d6f1f458b31debfa18bf3a5c9e4f7b01e24b"
+  name = "gopkg.in/yaml.v3"
+  packages = ["."]
+  pruneopts = "UT"
+  revision = "eeeca48fe7764f320e4870d231902bf9c1be2c08"
+
+[solve-meta]
+  analyzer-name = "dep"
+  analyzer-version = 1
+  input-imports = [
+    "github.com/crossdock/crossdock-go",
+    "github.com/golang/mock/gomock",
+    "github.com/opentracing/opentracing-go",
+    "github.com/opentracing/opentracing-go/ext",
+    "github.com/opentracing/opentracing-go/harness",
+    "github.com/opentracing/opentracing-go/log",
+    "github.com/pkg/errors",
+    "github.com/prometheus/client_golang/prometheus",
+    "github.com/stretchr/testify/assert",
+    "github.com/stretchr/testify/mock",
+    "github.com/stretchr/testify/require",
+    "github.com/stretchr/testify/suite",
+    "github.com/uber/jaeger-lib/metrics",
+    "github.com/uber/jaeger-lib/metrics/metricstest",
+    "github.com/uber/jaeger-lib/metrics/prometheus",
+    "go.uber.org/atomic",
+    "go.uber.org/zap",
+    "go.uber.org/zap/zapcore",
+    "go.uber.org/zap/zaptest/observer",
+  ]
+  solver-name = "gps-cdcl"
+  solver-version = 1
diff --git a/vendor/github.com/uber/jaeger-client-go/Gopkg.toml b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
new file mode 100644
index 0000000..3aa307a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Gopkg.toml
@@ -0,0 +1,31 @@
+[[constraint]]
+  name = "github.com/crossdock/crossdock-go"
+  branch = "master"
+
+[[constraint]]
+  name = "github.com/opentracing/opentracing-go"
+  version = "^1.2"
+
+[[constraint]]
+  name = "github.com/prometheus/client_golang"
+  version = "^1"
+
+[[constraint]]
+  name = "github.com/stretchr/testify"
+  version = "^1.1.3"
+
+[[constraint]]
+  name = "go.uber.org/atomic"
+  version = "^1"
+
+[[constraint]]
+  name = "github.com/uber/jaeger-lib"
+  version = "^2.3"
+
+[[constraint]]
+  name = "go.uber.org/zap"
+  version = "^1"
+
+[prune]
+  go-tests = true
+  unused-packages = true
diff --git a/vendor/github.com/uber/jaeger-client-go/LICENSE b/vendor/github.com/uber/jaeger-client-go/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-client-go/Makefile b/vendor/github.com/uber/jaeger-client-go/Makefile
new file mode 100644
index 0000000..bb7463c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/Makefile
@@ -0,0 +1,149 @@
+PROJECT_ROOT=github.com/uber/jaeger-client-go
+export GO111MODULE=off
+PACKAGES := . $(shell GO111MODULE=off go list ./... | awk -F/ 'NR>1 {print "./"$$4"/..."}' | grep -v -e ./thrift-gen/... -e ./thrift/... | sort -u)
+# all .go files that don't exist in hidden directories
+ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \
+        -e ".*/\..*" \
+        -e ".*/_.*" \
+        -e ".*/mocks.*")
+
+USE_DEP := true
+
+-include crossdock/rules.mk
+
+RACE=-race
+GOTEST=go test -v $(RACE)
+GOLINT=golint
+GOVET=go vet
+GOFMT=gofmt
+FMT_LOG=fmt.log
+LINT_LOG=lint.log
+
+THRIFT_VER=0.14
+THRIFT_IMG=jaegertracing/thrift:$(THRIFT_VER)
+THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift
+THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift"
+THRIFT_GEN_DIR=thrift-gen
+
+PASS=$(shell printf "\033[32mPASS\033[0m")
+FAIL=$(shell printf "\033[31mFAIL\033[0m")
+COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/''
+
+.DEFAULT_GOAL := test-and-lint
+
+.PHONY: test-and-lint
+test-and-lint: test fmt lint
+
+.PHONY: test
+test:
+ifeq ($(USE_DEP),true)
+	dep check
+endif
+	bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)"
+
+.PHONY: fmt
+fmt:
+	$(GOFMT) -e -s -l -w $(ALL_SRC)
+	./scripts/updateLicenses.sh
+
+.PHONY: lint
+lint: vet golint lint-fmt lint-thrift-testing
+
+.PHONY: vet
+vet:
+	$(GOVET) $(PACKAGES)
+
+.PHONY: golint
+golint:
+	@cat /dev/null > $(LINT_LOG)
+	@$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;)
+	@[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false)
+
+.PHONY: lint-fmt
+lint-fmt:
+	@$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG)
+	./scripts/updateLicenses.sh >> $(FMT_LOG)
+	@[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false)
+
+# make sure thrift/ module does not import "testing"
+.PHONY: lint-thrift-testing
+lint-thrift-testing:
+	@cat /dev/null > $(LINT_LOG)
+	@(grep -rn '"testing"' thrift | grep -v README.md > $(LINT_LOG)) || true
+	@[ ! -s "$(LINT_LOG)" ] || (echo '"thrift" module must not import "testing", see issue #585' | cat - $(LINT_LOG) && false)
+
+.PHONY: install
+install:
+	@echo install: USE_DEP=$(USE_DEP) USE_GLIDE=$(USE_GLIDE)
+ifeq ($(USE_DEP),true)
+	dep version || make install-dep
+	dep ensure -vendor-only -v
+endif
+ifeq ($(USE_GLIDE),true)
+	glide --version || go get github.com/Masterminds/glide
+	glide install
+endif
+
+
+.PHONY: cover
+cover:
+	$(GOTEST) -cover -coverprofile cover.out $(PACKAGES)
+
+.PHONY: cover-html
+cover-html: cover
+	go tool cover -html=cover.out -o cover.html
+
+# This is not part of the regular test target because we don't want to slow it
+# down.
+.PHONY: test-examples
+test-examples:
+	make -C examples
+
+.PHONY: thrift
+thrift: idl-submodule thrift-compile
+
+# TODO at the moment we're not generating tchan_*.go files
+.PHONY: thrift-compile
+thrift-compile: thrift-image
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift
+	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift
+	sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go
+	sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go
+	sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \
+		$(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go
+	rm -rf thrift-gen/*/*-remote
+	rm -rf crossdock/thrift/*/*-remote
+	rm -rf thrift-gen/jaeger/collector.go
+
+.PHONY: idl-submodule
+idl-submodule:
+	git submodule init
+	git submodule update
+
+.PHONY: thrift-image
+thrift-image:
+	$(THRIFT) -version
+
+.PHONY: install-dep
+install-dep:
+	- curl -L -s https://github.com/golang/dep/releases/download/v0.5.0/dep-linux-amd64 -o $$GOPATH/bin/dep
+	- chmod +x $$GOPATH/bin/dep
+
+.PHONY: install-ci
+install-ci: install
+	go get github.com/wadey/gocovmerge
+	go get github.com/mattn/goveralls
+	go get golang.org/x/tools/cmd/cover
+	go get golang.org/x/lint/golint
+
+.PHONY: test-ci
+test-ci: cover
+ifeq ($(CI_SKIP_LINT),true)
+	echo 'skipping lint'
+else
+	make lint
+endif
diff --git a/vendor/github.com/uber/jaeger-client-go/README.md b/vendor/github.com/uber/jaeger-client-go/README.md
new file mode 100644
index 0000000..687f578
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/README.md
@@ -0,0 +1,324 @@
+[![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url]
+
+# Jaeger Bindings for Go OpenTracing API
+
+Instrumentation library that implements an
+[OpenTracing Go](https://github.com/opentracing/opentracing-go) Tracer for Jaeger (https://jaegertracing.io).
+
+**IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release.
+  * :white_check_mark: `import "github.com/uber/jaeger-client-go"`
+  * :x: `import "github.com/jaegertracing/jaeger-client-go"`
+
+## How to Contribute
+
+Please see [CONTRIBUTING.md](CONTRIBUTING.md).
+
+## Installation
+
+We recommended using a dependency manager like [dep](https://golang.github.io/dep/)
+and [semantic versioning](http://semver.org/) when including this library into an application.
+For example, Jaeger backend imports this library like this:
+
+```toml
+[[constraint]]
+  name = "github.com/uber/jaeger-client-go"
+  version = "2.17"
+```
+
+If you instead want to use the latest version in `master`, you can pull it via `go get`.
+Note that during `go get` you may see build errors due to incompatible dependencies, which is why
+we recommend using semantic versions for dependencies.  The error  may be fixed by running
+`make install` (it will install `dep` if you don't have it):
+
+```shell
+go get -u github.com/uber/jaeger-client-go/
+cd $GOPATH/src/github.com/uber/jaeger-client-go/
+git submodule update --init --recursive
+make install
+```
+
+## Initialization
+
+See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples)
+and [config/example_test.go](./config/example_test.go).
+
+### Environment variables
+
+The tracer can be initialized with values coming from environment variables, if it is
+[built from a config](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#Configuration.NewTracer)
+that was created via [FromEnv()](https://pkg.go.dev/github.com/uber/jaeger-client-go/config?tab=doc#FromEnv).
+None of the env vars are required and all of them can be overridden via direct setting 
+of the property on the configuration object.
+
+Property| Description
+--- | ---
+JAEGER_SERVICE_NAME | The service name.
+JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP (default `localhost`).
+JAEGER_AGENT_PORT | The port for communicating with agent via UDP (default `6831`).
+JAEGER_ENDPOINT | The HTTP endpoint for sending spans directly to a collector, i.e. http://jaeger-collector:14268/api/traces. If specified, the agent host/port are ignored.
+JAEGER_USER | Username to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_PASSWORD | Password to send as part of "Basic" authentication to the collector endpoint.
+JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans" `true` or `false` (default `false`).
+JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size (default `100`).
+JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval, with units, e.g. `500ms` or `2s` ([valid units][timeunits]; default `1s`).
+JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED | When true, disables udp connection helper that periodically re-resolves the agent's hostname and reconnects if there was a change (default `false`).
+JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL | Controls how often the agent client re-resolves the provided hostname in order to detect address changes ([valid units][timeunits]; default `30s`).
+JAEGER_SAMPLER_TYPE | The sampler type: `remote`, `const`, `probabilistic`, `ratelimiting` (default `remote`). See also https://www.jaegertracing.io/docs/latest/sampling/.
+JAEGER_SAMPLER_PARAM | The sampler parameter (number).
+JAEGER_SAMPLER_MANAGER_HOST_PORT | (deprecated) The HTTP endpoint when using the `remote` sampler.
+JAEGER_SAMPLING_ENDPOINT | The URL for the sampling configuration server when using sampler type `remote` (default `http://127.0.0.1:5778/sampling`).
+JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of (default `2000`).
+JAEGER_SAMPLER_REFRESH_INTERVAL | How often the `remote` sampler should poll the configuration server for the appropriate sampling strategy, e.g. "1m" or "30s" ([valid units][timeunits]; default `1m`).
+JAEGER_TAGS | A comma separated list of `name=value` tracer-level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:defaultValue}`.
+JAEGER_DISABLED | Whether the tracer is disabled or not. If `true`, the `opentracing.NoopTracer` is used (default `false`).
+JAEGER_RPC_METRICS | Whether to store RPC metrics, `true` or `false` (default `false`).
+
+By default, the client sends traces via UDP to the agent at `localhost:6831`. Use `JAEGER_AGENT_HOST` and
+`JAEGER_AGENT_PORT` to send UDP traces to a different `host:port`. If `JAEGER_ENDPOINT` is set, the client sends traces
+to the endpoint via `HTTP`, making the `JAEGER_AGENT_HOST` and `JAEGER_AGENT_PORT` unused. If `JAEGER_ENDPOINT` is
+secured, HTTP basic authentication can be performed by setting the `JAEGER_USER` and `JAEGER_PASSWORD` environment
+variables.
+
+### Closing the tracer via `io.Closer`
+
+The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance.
+It is recommended to structure your `main()` so that it calls the `Close()` function on the closer
+before exiting, e.g.
+
+```go
+tracer, closer, err := cfg.NewTracer(...)
+defer closer.Close()
+```
+
+This is especially useful for command-line tools that enable tracing, as well as
+for the long-running apps that support graceful shutdown. For example, if your deployment
+system sends SIGTERM instead of killing the process and you trap that signal to do a graceful
+exit, then having `defer closer.Close()` ensures that all buffered spans are flushed.
+
+### Metrics & Monitoring
+
+The tracer emits a number of different metrics, defined in
+[metrics.go](metrics.go). The monitoring backend is expected to support
+tag-based metric names, e.g. instead of `statsd`-style string names
+like `counters.my-service.jaeger.spans.started.sampled`, the metrics
+are defined by a short name and a collection of key/value tags, for
+example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go)
+file for the full list and descriptions of emitted metrics.
+
+The monitoring backend is represented by the `metrics.Factory` interface from package
+[`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics).  An implementation
+of that interface can be passed as an option to either the Configuration object or the Tracer
+constructor, for example:
+
+```go
+import (
+    "github.com/uber/jaeger-client-go/config"
+    "github.com/uber/jaeger-lib/metrics/prometheus"
+)
+
+    metricsFactory := prometheus.New()
+    tracer, closer, err := config.Configuration{
+        ServiceName: "your-service-name",
+    }.NewTracer(
+        config.Metrics(metricsFactory),
+    )
+```
+
+By default, a no-op `metrics.NullFactory` is used.
+
+### Logging
+
+The tracer can be configured with an optional logger, which will be
+used to log communication errors, or log spans if a logging reporter
+option is specified in the configuration. The logging API is abstracted
+by the [Logger](logger.go) interface. A logger instance implementing
+this interface can be set on the `Config` object before calling the
+`New` method.
+
+Besides the [zap](https://github.com/uber-go/zap) implementation
+bundled with this package there is also a [go-kit](https://github.com/go-kit/kit)
+one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository.
+
+## Instrumentation for Tracing
+
+Since this tracer is fully compliant with OpenTracing API 1.0,
+all code instrumentation should only use the API itself, as described
+in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation.
+
+## Features
+
+### Reporters
+
+A "reporter" is a component that receives the finished spans and reports
+them to somewhere. Under normal circumstances, the Tracer
+should use the default `RemoteReporter`, which sends the spans out of
+process via configurable "transport". For testing purposes, one can
+use an `InMemoryReporter` that accumulates spans in a buffer and
+allows to retrieve them for later verification. Also available are
+`NullReporter`, a no-op reporter that does nothing, a `LoggingReporter`
+which logs all finished spans using their `String()` method, and a
+`CompositeReporter` that can be used to combine more than one reporter
+into one, e.g. to attach a logging reporter to the main remote reporter.
+
+### Span Reporting Transports
+
+The remote reporter uses "transports" to actually send the spans out
+of process. Currently the supported transports include:
+  * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP,
+  * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP.
+
+### Sampling
+
+The tracer does not record all spans, but only those that have the
+sampling bit set in the `flags`. When a new trace is started and a new
+unique ID is generated, a sampling decision is made whether this trace
+should be sampled. The sampling decision is propagated to all downstream
+calls via the `flags` field of the trace context. The following samplers
+are available:
+  1. `RemotelyControlledSampler` uses one of the other simpler samplers
+     and periodically updates it by polling an external server. This
+     allows dynamic control of the sampling strategies.
+  1. `ConstSampler` always makes the same sampling decision for all
+     trace IDs. it can be configured to either sample all traces, or
+     to sample none.
+  1. `ProbabilisticSampler` uses a fixed sampling rate as a probability
+     for a given trace to be sampled. The actual decision is made by
+     comparing the trace ID with a random number multiplied by the
+     sampling rate.
+  1. `RateLimitingSampler` can be used to allow only a certain fixed
+     number of traces to be sampled per second.
+
+#### Delayed sampling
+
+Version 2.20 introduced the ability to delay sampling decisions in the life cycle
+of the root span. It involves several features and architectural changes:
+  * **Shared sampling state**: the sampling state is shared across all local
+    (i.e. in-process) spans for a given trace.
+  * **New `SamplerV2` API** allows the sampler to be called at multiple points 
+    in the life cycle of a span:
+    * on span creation
+    * on overwriting span operation name
+    * on setting span tags
+    * on finishing the span
+  * **Final/non-final sampling state**: the new `SamplerV2` API allows the sampler
+    to indicate if the negative sampling decision is final or not (positive sampling
+    decisions are always final). If the decision is not final, the sampler will be
+    called again on further span life cycle events, like setting tags.
+
+These new features are used in the experimental `x.TagMatchingSampler`, which
+can sample a trace based on a certain tag added to the root
+span or one of its local (in-process) children. The sampler can be used with
+another experimental `x.PrioritySampler` that allows multiple samplers to try
+to make a sampling decision, in a certain priority order.
+
+### Baggage Injection
+
+The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added
+to the span context and propagated throughout the trace. An external process can inject baggage
+by setting the special HTTP Header `jaeger-baggage` on a request:
+
+```sh
+curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com
+```
+
+Baggage can also be programatically set inside your service:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+    span.SetBaggageItem("key", "value")
+}
+```
+
+Another service downstream of that can retrieve the baggage in a similar way:
+
+```go
+if span := opentracing.SpanFromContext(ctx); span != nil {
+    val := span.BaggageItem("key")
+    println(val)
+}
+```
+
+### Debug Traces (Forced Sampling)
+
+#### Programmatically
+
+The OpenTracing API defines a `sampling.priority` standard tag that
+can be used to affect the sampling of a span and its children:
+
+```go
+import (
+    "github.com/opentracing/opentracing-go"
+    "github.com/opentracing/opentracing-go/ext"
+)
+
+span := opentracing.SpanFromContext(ctx)
+ext.SamplingPriority.Set(span, 1)
+```
+
+#### Via HTTP Headers
+
+Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`,
+which can be set in the incoming request, e.g.
+
+```sh
+curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com
+```
+
+When Jaeger sees this header in the request that otherwise has no
+tracing context, it ensures that the new trace started for this
+request will be sampled in the "debug" mode (meaning it should survive
+all downsampling that might happen in the collection pipeline), and the
+root span will have a tag as if this statement was executed:
+
+```go
+span.SetTag("jaeger-debug-id", "some-correlation-id")
+```
+
+This allows using Jaeger UI to find the trace by this tag.
+
+### Zipkin HTTP B3 compatible header propagation
+
+Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used
+by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin).
+
+However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up.
+
+## SelfRef
+
+Jaeger Tracer supports an additional [span reference][] type call `Self`, which was proposed
+to the OpenTracing Specification (https://github.com/opentracing/specification/issues/81)
+but not yet accepted. This allows the caller to provide an already created `SpanContext`
+when starting a new span. The `Self` reference bypasses trace and span id generation,
+as well as sampling decisions (i.e. the sampling bit in the `SpanContext.flags` must be
+set appropriately by the caller).
+
+The `Self` reference supports the following use cases:
+  * the ability to provide externally generated trace and span IDs
+  * appending data to the same span from different processes, such as loading and continuing spans/traces from offline (ie log-based) storage
+
+Usage requires passing in a `SpanContext` and the `jaeger.Self` reference type:
+```
+span := tracer.StartSpan(
+    "continued_span",
+    jaeger.SelfRef(yourSpanContext),
+)
+...
+defer span.Finish()
+```
+
+## License
+
+[Apache 2.0 License](LICENSE).
+
+
+[doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg
+[doc]: https://godoc.org/github.com/uber/jaeger-client-go
+[ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master
+[ci]: https://travis-ci.org/jaegertracing/jaeger-client-go
+[cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg
+[cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go
+[ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg
+[ot-url]: http://opentracing.io
+[baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item
+[timeunits]: https://golang.org/pkg/time/#ParseDuration
+[span reference]: https://github.com/opentracing/specification/blob/1.1/specification.md#references-between-spans
diff --git a/vendor/github.com/uber/jaeger-client-go/RELEASE.md b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
new file mode 100644
index 0000000..12438d8
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/RELEASE.md
@@ -0,0 +1,12 @@
+# Release Process
+
+1. Create a PR "Preparing for release X.Y.Z" against master branch
+    * Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)`
+    * Use `git log --pretty=format:'- %s -- %an'` as the basis for for changelog entries
+    * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z`
+2. Create a release "Release X.Y.Z" on Github
+    * Create Tag `vX.Y.Z`
+    * Copy CHANGELOG.md into the release notes
+3. Create a PR "Back to development" against master branch
+    * Add `<next_version> (unreleased)` to CHANGELOG.md
+    * Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev`
diff --git a/vendor/github.com/uber/jaeger-client-go/baggage_setter.go b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
new file mode 100644
index 0000000..1037ca0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/baggage_setter.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/opentracing/opentracing-go/log"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+)
+
+// baggageSetter is an actor that can set a baggage value on a Span given certain
+// restrictions (eg. maxValueLength).
+type baggageSetter struct {
+	restrictionManager baggage.RestrictionManager
+	metrics            *Metrics
+}
+
+func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter {
+	return &baggageSetter{
+		restrictionManager: restrictionManager,
+		metrics:            metrics,
+	}
+}
+
+// (NB) span should hold the lock before making this call
+func (s *baggageSetter) setBaggage(span *Span, key, value string) {
+	var truncated bool
+	var prevItem string
+	restriction := s.restrictionManager.GetRestriction(span.serviceName(), key)
+	if !restriction.KeyAllowed() {
+		s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+		s.metrics.BaggageUpdateFailure.Inc(1)
+		return
+	}
+	if len(value) > restriction.MaxValueLength() {
+		truncated = true
+		value = value[:restriction.MaxValueLength()]
+		s.metrics.BaggageTruncate.Inc(1)
+	}
+	prevItem = span.context.baggage[key]
+	s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed())
+	span.context = span.context.WithBaggageItem(key, value)
+	s.metrics.BaggageUpdateSuccess.Inc(1)
+}
+
+func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) {
+	if !span.context.IsSampled() {
+		return
+	}
+	fields := []log.Field{
+		log.String("event", "baggage"),
+		log.String("key", key),
+		log.String("value", value),
+	}
+	if prevItem != "" {
+		fields = append(fields, log.String("override", "true"))
+	}
+	if truncated {
+		fields = append(fields, log.String("truncated", "true"))
+	}
+	if !valid {
+		fields = append(fields, log.String("invalid", "true"))
+	}
+	span.logFieldsNoLocking(fields...)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config.go b/vendor/github.com/uber/jaeger-client-go/config/config.go
new file mode 100644
index 0000000..c2222f1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config.go
@@ -0,0 +1,447 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"strings"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/uber/jaeger-client-go/utils"
+
+	"github.com/uber/jaeger-client-go"
+	"github.com/uber/jaeger-client-go/internal/baggage/remote"
+	throttler "github.com/uber/jaeger-client-go/internal/throttler/remote"
+	"github.com/uber/jaeger-client-go/rpcmetrics"
+	"github.com/uber/jaeger-client-go/transport"
+	"github.com/uber/jaeger-lib/metrics"
+)
+
+const defaultSamplingProbability = 0.001
+
+// Configuration configures and creates Jaeger Tracer
+type Configuration struct {
+	// ServiceName specifies the service name to use on the tracer.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SERVICE_NAME
+	ServiceName string `yaml:"serviceName"`
+
+	// Disabled makes the config return opentracing.NoopTracer.
+	// Value can be provided by FromEnv() via the environment variable named JAEGER_DISABLED.
+	Disabled bool `yaml:"disabled"`
+
+	// RPCMetrics enables generations of RPC metrics (requires metrics factory to be provided).
+	// Value can be provided by FromEnv() via the environment variable named JAEGER_RPC_METRICS
+	RPCMetrics bool `yaml:"rpc_metrics"`
+
+	// Gen128Bit instructs the tracer to generate 128-bit wide trace IDs, compatible with W3C Trace Context.
+	// Value can be provided by FromEnv() via the environment variable named JAEGER_TRACEID_128BIT.
+	Gen128Bit bool `yaml:"traceid_128bit"`
+
+	// Tags can be provided by FromEnv() via the environment variable named JAEGER_TAGS
+	Tags []opentracing.Tag `yaml:"tags"`
+
+	Sampler             *SamplerConfig             `yaml:"sampler"`
+	Reporter            *ReporterConfig            `yaml:"reporter"`
+	Headers             *jaeger.HeadersConfig      `yaml:"headers"`
+	BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"`
+	Throttler           *ThrottlerConfig           `yaml:"throttler"`
+}
+
+// SamplerConfig allows initializing a non-default sampler.  All fields are optional.
+type SamplerConfig struct {
+	// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_TYPE
+	Type string `yaml:"type"`
+
+	// Param is a value passed to the sampler.
+	// Valid values for Param field are:
+	// - for "const" sampler, 0 or 1 for always false/true respectively
+	// - for "probabilistic" sampler, a probability between 0 and 1
+	// - for "rateLimiting" sampler, the number of spans per second
+	// - for "remote" sampler, param is the same as for "probabilistic"
+	//   and indicates the initial sampling rate before the actual one
+	//   is received from the mothership.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_PARAM
+	Param float64 `yaml:"param"`
+
+	// SamplingServerURL is the URL of sampling manager that can provide
+	// sampling strategy to this service.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLING_ENDPOINT
+	SamplingServerURL string `yaml:"samplingServerURL"`
+
+	// SamplingRefreshInterval controls how often the remotely controlled sampler will poll
+	// sampling manager for the appropriate sampling strategy.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL
+	SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"`
+
+	// MaxOperations is the maximum number of operations that the PerOperationSampler
+	// will keep track of. If an operation is not tracked, a default probabilistic
+	// sampler will be used rather than the per operation specific sampler.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_SAMPLER_MAX_OPERATIONS.
+	MaxOperations int `yaml:"maxOperations"`
+
+	// Opt-in feature for applications that require late binding of span name via explicit
+	// call to SetOperationName when using PerOperationSampler. When this feature is enabled,
+	// the sampler will return retryable=true from OnCreateSpan(), thus leaving the sampling
+	// decision as non-final (and the span as writeable). This may lead to degraded performance
+	// in applications that always provide the correct span name on trace creation.
+	//
+	// For backwards compatibility this option is off by default.
+	OperationNameLateBinding bool `yaml:"operationNameLateBinding"`
+
+	// Options can be used to programmatically pass additional options to the Remote sampler.
+	Options []jaeger.SamplerOption
+}
+
+// ReporterConfig configures the reporter. All fields are optional.
+type ReporterConfig struct {
+	// QueueSize controls how many spans the reporter can keep in memory before it starts dropping
+	// new spans. The queue is continuously drained by a background go-routine, as fast as spans
+	// can be sent out of process.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE
+	QueueSize int `yaml:"queueSize"`
+
+	// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full.
+	// It is generally not useful, as it only matters for very low traffic services.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_FLUSH_INTERVAL
+	BufferFlushInterval time.Duration
+
+	// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter
+	// and logs all submitted spans. Main Configuration.Logger must be initialized in the code
+	// for this option to have any effect.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_LOG_SPANS
+	LogSpans bool `yaml:"logSpans"`
+
+	// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT
+	LocalAgentHostPort string `yaml:"localAgentHostPort"`
+
+	// DisableAttemptReconnecting when true, disables udp connection helper that periodically re-resolves
+	// the agent's hostname and reconnects if there was a change. This option only
+	// applies if LocalAgentHostPort is specified.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED
+	DisableAttemptReconnecting bool `yaml:"disableAttemptReconnecting"`
+
+	// AttemptReconnectInterval controls how often the agent client re-resolves the provided hostname
+	// in order to detect address changes. This option only applies if DisableAttemptReconnecting is false.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL
+	AttemptReconnectInterval time.Duration
+
+	// CollectorEndpoint instructs reporter to send spans to jaeger-collector at this URL.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_ENDPOINT
+	CollectorEndpoint string `yaml:"collectorEndpoint"`
+
+	// User instructs reporter to include a user for basic http authentication when sending spans to jaeger-collector.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_USER
+	User string `yaml:"user"`
+
+	// Password instructs reporter to include a password for basic http authentication when sending spans to
+	// jaeger-collector.
+	// Can be provided by FromEnv() via the environment variable named JAEGER_PASSWORD
+	Password string `yaml:"password"`
+
+	// HTTPHeaders instructs the reporter to add these headers to the http request when reporting spans.
+	// This field takes effect only when using HTTPTransport by setting the CollectorEndpoint.
+	HTTPHeaders map[string]string `yaml:"http_headers"`
+}
+
+// BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist
+// certain baggage keys. All fields are optional.
+type BaggageRestrictionsConfig struct {
+	// DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction
+	// manager. If true, the manager will not allow any baggage to be written until baggage restrictions have
+	// been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage
+	// restrictions have been retrieved from jaeger-agent.
+	DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"`
+
+	// HostPort is the hostPort of jaeger-agent's baggage restrictions server
+	HostPort string `yaml:"hostPort"`
+
+	// RefreshInterval controls how often the baggage restriction manager will poll
+	// jaeger-agent for the most recent baggage restrictions.
+	RefreshInterval time.Duration `yaml:"refreshInterval"`
+}
+
+// ThrottlerConfig configures the throttler which can be used to throttle the
+// rate at which the client may send debug requests.
+type ThrottlerConfig struct {
+	// HostPort of jaeger-agent's credit server.
+	HostPort string `yaml:"hostPort"`
+
+	// RefreshInterval controls how often the throttler will poll jaeger-agent
+	// for more throttling credits.
+	RefreshInterval time.Duration `yaml:"refreshInterval"`
+
+	// SynchronousInitialization determines whether or not the throttler should
+	// synchronously fetch credits from the agent when an operation is seen for
+	// the first time. This should be set to true if the client will be used by
+	// a short lived service that needs to ensure that credits are fetched
+	// upfront such that sampling or throttling occurs.
+	SynchronousInitialization bool `yaml:"synchronousInitialization"`
+}
+
+type nullCloser struct{}
+
+func (*nullCloser) Close() error { return nil }
+
+// New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers
+// before shutdown.
+//
+// Deprecated: use NewTracer() function
+func (c Configuration) New(
+	serviceName string,
+	options ...Option,
+) (opentracing.Tracer, io.Closer, error) {
+	if serviceName != "" {
+		c.ServiceName = serviceName
+	}
+
+	return c.NewTracer(options...)
+}
+
+// NewTracer returns a new tracer based on the current configuration, using the given options,
+// and a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) {
+	if c.Disabled {
+		return &opentracing.NoopTracer{}, &nullCloser{}, nil
+	}
+
+	if c.ServiceName == "" {
+		return nil, nil, errors.New("no service name provided")
+	}
+
+	opts := applyOptions(options...)
+	tracerMetrics := jaeger.NewMetrics(opts.metrics, nil)
+	if c.RPCMetrics {
+		Observer(
+			rpcmetrics.NewObserver(
+				opts.metrics.Namespace(metrics.NSOptions{Name: "jaeger-rpc", Tags: map[string]string{"component": "jaeger"}}),
+				rpcmetrics.DefaultNameNormalizer,
+			),
+		)(&opts) // adds to c.observers
+	}
+	if c.Sampler == nil {
+		c.Sampler = &SamplerConfig{
+			Type:  jaeger.SamplerTypeRemote,
+			Param: defaultSamplingProbability,
+		}
+	}
+	if c.Reporter == nil {
+		c.Reporter = &ReporterConfig{}
+	}
+
+	sampler := opts.sampler
+	if sampler == nil {
+		s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics)
+		if err != nil {
+			return nil, nil, err
+		}
+		sampler = s
+	}
+
+	reporter := opts.reporter
+	if reporter == nil {
+		r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger)
+		if err != nil {
+			return nil, nil, err
+		}
+		reporter = r
+	}
+
+	tracerOptions := []jaeger.TracerOption{
+		jaeger.TracerOptions.Metrics(tracerMetrics),
+		jaeger.TracerOptions.Logger(opts.logger),
+		jaeger.TracerOptions.CustomHeaderKeys(c.Headers),
+		jaeger.TracerOptions.PoolSpans(opts.poolSpans),
+		jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan),
+		jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength),
+		jaeger.TracerOptions.NoDebugFlagOnForcedSampling(opts.noDebugFlagOnForcedSampling),
+	}
+
+	if c.Gen128Bit || opts.gen128Bit {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Gen128Bit(true))
+	}
+
+	if opts.randomNumber != nil {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.RandomNumber(opts.randomNumber))
+	}
+
+	for _, tag := range opts.tags {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+	}
+
+	for _, tag := range c.Tags {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value))
+	}
+
+	for _, obs := range opts.observers {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs))
+	}
+
+	for _, cobs := range opts.contribObservers {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs))
+	}
+
+	for format, injector := range opts.injectors {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector))
+	}
+
+	for format, extractor := range opts.extractors {
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor))
+	}
+
+	if c.BaggageRestrictions != nil {
+		mgr := remote.NewRestrictionManager(
+			c.ServiceName,
+			remote.Options.Metrics(tracerMetrics),
+			remote.Options.Logger(opts.logger),
+			remote.Options.HostPort(c.BaggageRestrictions.HostPort),
+			remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval),
+			remote.Options.DenyBaggageOnInitializationFailure(
+				c.BaggageRestrictions.DenyBaggageOnInitializationFailure,
+			),
+		)
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr))
+	}
+
+	if c.Throttler != nil {
+		debugThrottler := throttler.NewThrottler(
+			c.ServiceName,
+			throttler.Options.Metrics(tracerMetrics),
+			throttler.Options.Logger(opts.logger),
+			throttler.Options.HostPort(c.Throttler.HostPort),
+			throttler.Options.RefreshInterval(c.Throttler.RefreshInterval),
+			throttler.Options.SynchronousInitialization(
+				c.Throttler.SynchronousInitialization,
+			),
+		)
+
+		tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler))
+	}
+
+	tracer, closer := jaeger.NewTracer(
+		c.ServiceName,
+		sampler,
+		reporter,
+		tracerOptions...,
+	)
+
+	return tracer, closer, nil
+}
+
+// InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer.
+// It returns a closer func that can be used to flush buffers before shutdown.
+func (c Configuration) InitGlobalTracer(
+	serviceName string,
+	options ...Option,
+) (io.Closer, error) {
+	if c.Disabled {
+		return &nullCloser{}, nil
+	}
+	tracer, closer, err := c.New(serviceName, options...)
+	if err != nil {
+		return nil, err
+	}
+	opentracing.SetGlobalTracer(tracer)
+	return closer, nil
+}
+
+// NewSampler creates a new sampler based on the configuration
+func (sc *SamplerConfig) NewSampler(
+	serviceName string,
+	metrics *jaeger.Metrics,
+) (jaeger.Sampler, error) {
+	samplerType := strings.ToLower(sc.Type)
+	if samplerType == jaeger.SamplerTypeConst {
+		return jaeger.NewConstSampler(sc.Param != 0), nil
+	}
+	if samplerType == jaeger.SamplerTypeProbabilistic {
+		if sc.Param >= 0 && sc.Param <= 1.0 {
+			return jaeger.NewProbabilisticSampler(sc.Param)
+		}
+		return nil, fmt.Errorf(
+			"invalid Param for probabilistic sampler; expecting value between 0 and 1, received %v",
+			sc.Param,
+		)
+	}
+	if samplerType == jaeger.SamplerTypeRateLimiting {
+		return jaeger.NewRateLimitingSampler(sc.Param), nil
+	}
+	if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" {
+		sc2 := *sc
+		sc2.Type = jaeger.SamplerTypeProbabilistic
+		initSampler, err := sc2.NewSampler(serviceName, nil)
+		if err != nil {
+			return nil, err
+		}
+		options := []jaeger.SamplerOption{
+			jaeger.SamplerOptions.Metrics(metrics),
+			jaeger.SamplerOptions.InitialSampler(initSampler),
+			jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL),
+			jaeger.SamplerOptions.MaxOperations(sc.MaxOperations),
+			jaeger.SamplerOptions.OperationNameLateBinding(sc.OperationNameLateBinding),
+			jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval),
+		}
+		options = append(options, sc.Options...)
+		return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil
+	}
+	return nil, fmt.Errorf("unknown sampler type (%s)", sc.Type)
+}
+
+// NewReporter instantiates a new reporter that submits spans to the collector
+func (rc *ReporterConfig) NewReporter(
+	serviceName string,
+	metrics *jaeger.Metrics,
+	logger jaeger.Logger,
+) (jaeger.Reporter, error) {
+	sender, err := rc.newTransport(logger)
+	if err != nil {
+		return nil, err
+	}
+	reporter := jaeger.NewRemoteReporter(
+		sender,
+		jaeger.ReporterOptions.QueueSize(rc.QueueSize),
+		jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval),
+		jaeger.ReporterOptions.Logger(logger),
+		jaeger.ReporterOptions.Metrics(metrics))
+	if rc.LogSpans && logger != nil {
+		logger.Infof("Initializing logging reporter\n")
+		reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter)
+	}
+	return reporter, err
+}
+
+func (rc *ReporterConfig) newTransport(logger jaeger.Logger) (jaeger.Transport, error) {
+	switch {
+	case rc.CollectorEndpoint != "":
+		httpOptions := []transport.HTTPOption{transport.HTTPHeaders(rc.HTTPHeaders)}
+		if rc.User != "" && rc.Password != "" {
+			httpOptions = append(httpOptions, transport.HTTPBasicAuth(rc.User, rc.Password))
+		}
+		return transport.NewHTTPTransport(rc.CollectorEndpoint, httpOptions...), nil
+	default:
+		return jaeger.NewUDPTransportWithParams(jaeger.UDPTransportParams{
+			AgentClientUDPParams: utils.AgentClientUDPParams{
+				HostPort:                   rc.LocalAgentHostPort,
+				Logger:                     logger,
+				DisableAttemptReconnecting: rc.DisableAttemptReconnecting,
+				AttemptReconnectInterval:   rc.AttemptReconnectInterval,
+			},
+		})
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/config_env.go b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
new file mode 100644
index 0000000..0fc3c53
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/config_env.go
@@ -0,0 +1,268 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	"fmt"
+	"net/url"
+	"os"
+	"strconv"
+	"strings"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/pkg/errors"
+	"github.com/uber/jaeger-client-go"
+)
+
+const (
+	// environment variable names
+	envServiceName                         = "JAEGER_SERVICE_NAME"
+	envDisabled                            = "JAEGER_DISABLED"
+	envRPCMetrics                          = "JAEGER_RPC_METRICS"
+	envTags                                = "JAEGER_TAGS"
+	envSamplerType                         = "JAEGER_SAMPLER_TYPE"
+	envSamplerParam                        = "JAEGER_SAMPLER_PARAM"
+	envSamplerManagerHostPort              = "JAEGER_SAMPLER_MANAGER_HOST_PORT" // Deprecated by envSamplingEndpoint
+	envSamplingEndpoint                    = "JAEGER_SAMPLING_ENDPOINT"
+	envSamplerMaxOperations                = "JAEGER_SAMPLER_MAX_OPERATIONS"
+	envSamplerRefreshInterval              = "JAEGER_SAMPLER_REFRESH_INTERVAL"
+	envReporterMaxQueueSize                = "JAEGER_REPORTER_MAX_QUEUE_SIZE"
+	envReporterFlushInterval               = "JAEGER_REPORTER_FLUSH_INTERVAL"
+	envReporterLogSpans                    = "JAEGER_REPORTER_LOG_SPANS"
+	envReporterAttemptReconnectingDisabled = "JAEGER_REPORTER_ATTEMPT_RECONNECTING_DISABLED"
+	envReporterAttemptReconnectInterval    = "JAEGER_REPORTER_ATTEMPT_RECONNECT_INTERVAL"
+	envEndpoint                            = "JAEGER_ENDPOINT"
+	envUser                                = "JAEGER_USER"
+	envPassword                            = "JAEGER_PASSWORD"
+	envAgentHost                           = "JAEGER_AGENT_HOST"
+	envAgentPort                           = "JAEGER_AGENT_PORT"
+	env128bit                              = "JAEGER_TRACEID_128BIT"
+)
+
+// FromEnv uses environment variables to set the tracer's Configuration
+func FromEnv() (*Configuration, error) {
+	c := &Configuration{}
+	return c.FromEnv()
+}
+
+// FromEnv uses environment variables and overrides existing tracer's Configuration
+func (c *Configuration) FromEnv() (*Configuration, error) {
+	if e := os.Getenv(envServiceName); e != "" {
+		c.ServiceName = e
+	}
+
+	if e := os.Getenv(envRPCMetrics); e != "" {
+		if value, err := strconv.ParseBool(e); err == nil {
+			c.RPCMetrics = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e)
+		}
+	}
+
+	if e := os.Getenv(envDisabled); e != "" {
+		if value, err := strconv.ParseBool(e); err == nil {
+			c.Disabled = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e)
+		}
+	}
+
+	if e := os.Getenv(envTags); e != "" {
+		c.Tags = parseTags(e)
+	}
+
+	if e := os.Getenv(env128bit); e != "" {
+		if value, err := strconv.ParseBool(e); err == nil {
+			c.Gen128Bit = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", env128bit, e)
+		}
+	}
+
+	if c.Sampler == nil {
+		c.Sampler = &SamplerConfig{}
+	}
+
+	if s, err := c.Sampler.samplerConfigFromEnv(); err == nil {
+		c.Sampler = s
+	} else {
+		return nil, errors.Wrap(err, "cannot obtain sampler config from env")
+	}
+
+	if c.Reporter == nil {
+		c.Reporter = &ReporterConfig{}
+	}
+
+	if r, err := c.Reporter.reporterConfigFromEnv(); err == nil {
+		c.Reporter = r
+	} else {
+		return nil, errors.Wrap(err, "cannot obtain reporter config from env")
+	}
+
+	return c, nil
+}
+
+// samplerConfigFromEnv creates a new SamplerConfig based on the environment variables
+func (sc *SamplerConfig) samplerConfigFromEnv() (*SamplerConfig, error) {
+	if e := os.Getenv(envSamplerType); e != "" {
+		sc.Type = e
+	}
+
+	if e := os.Getenv(envSamplerParam); e != "" {
+		if value, err := strconv.ParseFloat(e, 64); err == nil {
+			sc.Param = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e)
+		}
+	}
+
+	if e := os.Getenv(envSamplingEndpoint); e != "" {
+		sc.SamplingServerURL = e
+	} else if e := os.Getenv(envSamplerManagerHostPort); e != "" {
+		sc.SamplingServerURL = e
+	} else if e := os.Getenv(envAgentHost); e != "" {
+		// Fallback if we know the agent host - try the sampling endpoint there
+		sc.SamplingServerURL = fmt.Sprintf("http://%s:%d/sampling", e, jaeger.DefaultSamplingServerPort)
+	}
+
+	if e := os.Getenv(envSamplerMaxOperations); e != "" {
+		if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+			sc.MaxOperations = int(value)
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e)
+		}
+	}
+
+	if e := os.Getenv(envSamplerRefreshInterval); e != "" {
+		if value, err := time.ParseDuration(e); err == nil {
+			sc.SamplingRefreshInterval = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e)
+		}
+	}
+
+	return sc, nil
+}
+
+// reporterConfigFromEnv creates a new ReporterConfig based on the environment variables
+func (rc *ReporterConfig) reporterConfigFromEnv() (*ReporterConfig, error) {
+	if e := os.Getenv(envReporterMaxQueueSize); e != "" {
+		if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+			rc.QueueSize = int(value)
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e)
+		}
+	}
+
+	if e := os.Getenv(envReporterFlushInterval); e != "" {
+		if value, err := time.ParseDuration(e); err == nil {
+			rc.BufferFlushInterval = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e)
+		}
+	}
+
+	if e := os.Getenv(envReporterLogSpans); e != "" {
+		if value, err := strconv.ParseBool(e); err == nil {
+			rc.LogSpans = value
+		} else {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e)
+		}
+	}
+
+	if e := os.Getenv(envEndpoint); e != "" {
+		u, err := url.ParseRequestURI(e)
+		if err != nil {
+			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envEndpoint, e)
+		}
+		rc.CollectorEndpoint = u.String()
+		user := os.Getenv(envUser)
+		pswd := os.Getenv(envPassword)
+		if user != "" && pswd == "" || user == "" && pswd != "" {
+			return nil, errors.Errorf("you must set %s and %s env vars together", envUser, envPassword)
+		}
+		rc.User = user
+		rc.Password = pswd
+	} else {
+		useEnv := false
+		host := jaeger.DefaultUDPSpanServerHost
+		if e := os.Getenv(envAgentHost); e != "" {
+			host = e
+			useEnv = true
+		}
+
+		port := jaeger.DefaultUDPSpanServerPort
+		if e := os.Getenv(envAgentPort); e != "" {
+			if value, err := strconv.ParseInt(e, 10, 0); err == nil {
+				port = int(value)
+				useEnv = true
+			} else {
+				return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e)
+			}
+		}
+		if useEnv || rc.LocalAgentHostPort == "" {
+			rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port)
+		}
+
+		if e := os.Getenv(envReporterAttemptReconnectingDisabled); e != "" {
+			if value, err := strconv.ParseBool(e); err == nil {
+				rc.DisableAttemptReconnecting = value
+			} else {
+				return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectingDisabled, e)
+			}
+		}
+
+		if !rc.DisableAttemptReconnecting {
+			if e := os.Getenv(envReporterAttemptReconnectInterval); e != "" {
+				if value, err := time.ParseDuration(e); err == nil {
+					rc.AttemptReconnectInterval = value
+				} else {
+					return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterAttemptReconnectInterval, e)
+				}
+			}
+		}
+	}
+
+	return rc, nil
+}
+
+// parseTags parses the given string into a collection of Tags.
+// Spec for this value:
+// - comma separated list of key=value
+// - value can be specified using the notation ${envVar:defaultValue}, where `envVar`
+// is an environment variable and `defaultValue` is the value to use in case the env var is not set
+func parseTags(sTags string) []opentracing.Tag {
+	pairs := strings.Split(sTags, ",")
+	tags := make([]opentracing.Tag, 0)
+	for _, p := range pairs {
+		kv := strings.SplitN(p, "=", 2)
+		k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1])
+
+		if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") {
+			ed := strings.SplitN(v[2:len(v)-1], ":", 2)
+			e, d := ed[0], ed[1]
+			v = os.Getenv(e)
+			if v == "" && d != "" {
+				v = d
+			}
+		}
+
+		tag := opentracing.Tag{Key: k, Value: v}
+		tags = append(tags, tag)
+	}
+
+	return tags
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/config/options.go b/vendor/github.com/uber/jaeger-client-go/config/options.go
new file mode 100644
index 0000000..a2b9cbc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/config/options.go
@@ -0,0 +1,173 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package config
+
+import (
+	opentracing "github.com/opentracing/opentracing-go"
+	"github.com/uber/jaeger-lib/metrics"
+
+	"github.com/uber/jaeger-client-go"
+)
+
+// Option is a function that sets some option on the client.
+type Option func(c *Options)
+
+// Options control behavior of the client.
+type Options struct {
+	metrics                     metrics.Factory
+	logger                      jaeger.Logger
+	reporter                    jaeger.Reporter
+	sampler                     jaeger.Sampler
+	contribObservers            []jaeger.ContribObserver
+	observers                   []jaeger.Observer
+	gen128Bit                   bool
+	poolSpans                   bool
+	zipkinSharedRPCSpan         bool
+	maxTagValueLength           int
+	noDebugFlagOnForcedSampling bool
+	tags                        []opentracing.Tag
+	injectors                   map[interface{}]jaeger.Injector
+	extractors                  map[interface{}]jaeger.Extractor
+	randomNumber                func() uint64
+}
+
+// Metrics creates an Option that initializes Metrics in the tracer,
+// which is used to emit statistics about spans.
+func Metrics(factory metrics.Factory) Option {
+	return func(c *Options) {
+		c.metrics = factory
+	}
+}
+
+// Logger can be provided to log Reporter errors, as well as to log spans
+// if Reporter.LogSpans is set to true.
+func Logger(logger jaeger.Logger) Option {
+	return func(c *Options) {
+		c.logger = logger
+	}
+}
+
+// Reporter can be provided explicitly to override the configuration.
+// Useful for testing, e.g. by passing InMemoryReporter.
+func Reporter(reporter jaeger.Reporter) Option {
+	return func(c *Options) {
+		c.reporter = reporter
+	}
+}
+
+// Sampler can be provided explicitly to override the configuration.
+func Sampler(sampler jaeger.Sampler) Option {
+	return func(c *Options) {
+		c.sampler = sampler
+	}
+}
+
+// Observer can be registered with the Tracer to receive notifications about new Spans.
+func Observer(observer jaeger.Observer) Option {
+	return func(c *Options) {
+		c.observers = append(c.observers, observer)
+	}
+}
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new spans.
+func ContribObserver(observer jaeger.ContribObserver) Option {
+	return func(c *Options) {
+		c.contribObservers = append(c.contribObservers, observer)
+	}
+}
+
+// Gen128Bit specifies whether to generate 128bit trace IDs.
+func Gen128Bit(gen128Bit bool) Option {
+	return func(c *Options) {
+		c.gen128Bit = gen128Bit
+	}
+}
+
+// PoolSpans specifies whether to pool spans
+func PoolSpans(poolSpans bool) Option {
+	return func(c *Options) {
+		c.poolSpans = poolSpans
+	}
+}
+
+// ZipkinSharedRPCSpan creates an option that enables sharing span ID between client
+// and server spans a la zipkin. If false, client and server spans will be assigned
+// different IDs.
+func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option {
+	return func(c *Options) {
+		c.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+	}
+}
+
+// MaxTagValueLength can be provided to override the default max tag value length.
+func MaxTagValueLength(maxTagValueLength int) Option {
+	return func(c *Options) {
+		c.maxTagValueLength = maxTagValueLength
+	}
+}
+
+// NoDebugFlagOnForcedSampling can be used to decide whether debug flag will be set or not
+// when calling span.setSamplingPriority to force sample a span.
+func NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) Option {
+	return func(c *Options) {
+		c.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
+	}
+}
+
+// Tag creates an option that adds a tracer-level tag.
+func Tag(key string, value interface{}) Option {
+	return func(c *Options) {
+		c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value})
+	}
+}
+
+// Injector registers an Injector with the given format.
+func Injector(format interface{}, injector jaeger.Injector) Option {
+	return func(c *Options) {
+		c.injectors[format] = injector
+	}
+}
+
+// Extractor registers an Extractor with the given format.
+func Extractor(format interface{}, extractor jaeger.Extractor) Option {
+	return func(c *Options) {
+		c.extractors[format] = extractor
+	}
+}
+
+// WithRandomNumber supplies a random number generator function to the Tracer used to generate trace and span IDs.
+func WithRandomNumber(f func() uint64) Option {
+	return func(c *Options) {
+		c.randomNumber = f
+	}
+}
+
+func applyOptions(options ...Option) Options {
+	opts := Options{
+		injectors:  make(map[interface{}]jaeger.Injector),
+		extractors: make(map[interface{}]jaeger.Extractor),
+	}
+	for _, option := range options {
+		option(&opts)
+	}
+	if opts.metrics == nil {
+		opts.metrics = metrics.NullFactory
+	}
+	if opts.logger == nil {
+		opts.logger = jaeger.NullLogger
+	}
+	return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/constants.go b/vendor/github.com/uber/jaeger-client-go/constants.go
new file mode 100644
index 0000000..d8eb698
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/constants.go
@@ -0,0 +1,106 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+
+	"github.com/opentracing/opentracing-go"
+)
+
+const (
+	// JaegerClientVersion is the version of the client library reported as Span tag.
+	JaegerClientVersion = "Go-2.29.1"
+
+	// JaegerClientVersionTagKey is the name of the tag used to report client version.
+	JaegerClientVersionTagKey = "jaeger.version"
+
+	// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+	// if found in the carrier, forces the trace to be sampled as "debug" trace.
+	// The value of the header is recorded as the tag on the root span, so that the
+	// trace can be found in the UI using this value as a correlation ID.
+	JaegerDebugHeader = "jaeger-debug-id"
+
+	// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+	// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+	// a root span does not exist.
+	JaegerBaggageHeader = "jaeger-baggage"
+
+	// TracerHostnameTagKey used to report host name of the process.
+	TracerHostnameTagKey = "hostname"
+
+	// TracerIPTagKey used to report ip of the process.
+	TracerIPTagKey = "ip"
+
+	// TracerUUIDTagKey used to report UUID of the client process.
+	TracerUUIDTagKey = "client-uuid"
+
+	// SamplerTypeTagKey reports which sampler was used on the root span.
+	SamplerTypeTagKey = "sampler.type"
+
+	// SamplerParamTagKey reports the parameter of the sampler, like sampling probability.
+	SamplerParamTagKey = "sampler.param"
+
+	// TraceContextHeaderName is the http header name used to propagate tracing context.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceContextHeaderName = "uber-trace-id"
+
+	// TracerStateHeaderName is deprecated.
+	// Deprecated: use TraceContextHeaderName
+	TracerStateHeaderName = TraceContextHeaderName
+
+	// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceBaggageHeaderPrefix = "uberctx-"
+
+	// SamplerTypeConst is the type of sampler that always makes the same decision.
+	SamplerTypeConst = "const"
+
+	// SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy.
+	SamplerTypeRemote = "remote"
+
+	// SamplerTypeProbabilistic is the type of sampler that samples traces
+	// with a certain fixed probability.
+	SamplerTypeProbabilistic = "probabilistic"
+
+	// SamplerTypeRateLimiting is the type of sampler that samples
+	// only up to a fixed number of traces per second.
+	SamplerTypeRateLimiting = "ratelimiting"
+
+	// SamplerTypeLowerBound is the type of sampler that samples
+	// at least a fixed number of traces per second.
+	SamplerTypeLowerBound = "lowerbound"
+
+	// DefaultUDPSpanServerHost is the default host to send the spans to, via UDP
+	DefaultUDPSpanServerHost = "localhost"
+
+	// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP
+	DefaultUDPSpanServerPort = 6831
+
+	// DefaultSamplingServerPort is the default port to fetch sampling config from, via http
+	DefaultSamplingServerPort = 5778
+
+	// DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value.
+	DefaultMaxTagValueLength = 256
+
+	// SelfRefType is a jaeger specific reference type that supports creating a span
+	// with an already defined context.
+	selfRefType opentracing.SpanReferenceType = 99
+)
+
+var (
+	// DefaultSamplingServerURL is the default url to fetch sampling config from, via http
+	DefaultSamplingServerURL = fmt.Sprintf("http://127.0.0.1:%d/sampling", DefaultSamplingServerPort)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/contrib_observer.go b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
new file mode 100644
index 0000000..4ce1881
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/contrib_observer.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	opentracing "github.com/opentracing/opentracing-go"
+)
+
+// ContribObserver can be registered with the Tracer to receive notifications
+// about new Spans. Modelled after github.com/opentracing-contrib/go-observer.
+type ContribObserver interface {
+	// Create and return a span observer. Called when a span starts.
+	// If the Observer is not interested in the given span, it must return (nil, false).
+	// E.g :
+	//     func StartSpan(opName string, opts ...opentracing.StartSpanOption) {
+	//         var sp opentracing.Span
+	//         sso := opentracing.StartSpanOptions{}
+	//         if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok {
+	//             // we have a valid SpanObserver
+	//         }
+	//         ...
+	//     }
+	OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool)
+}
+
+// ContribSpanObserver is created by the Observer and receives notifications
+// about other Span events. This interface is meant to match
+// github.com/opentracing-contrib/go-observer, via duck typing, without
+// directly importing the go-observer package.
+type ContribSpanObserver interface {
+	OnSetOperationName(operationName string)
+	OnSetTag(key string, value interface{})
+	OnFinish(options opentracing.FinishOptions)
+}
+
+// wrapper observer for the old observers (see observer.go)
+type oldObserver struct {
+	obs Observer
+}
+
+func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) {
+	spanObserver := o.obs.OnStartSpan(operationName, options)
+	return spanObserver, spanObserver != nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/doc.go b/vendor/github.com/uber/jaeger-client-go/doc.go
new file mode 100644
index 0000000..4f55490
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/doc.go
@@ -0,0 +1,24 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+/*
+Package jaeger implements an OpenTracing (http://opentracing.io) Tracer.
+It is currently using Zipkin-compatible data model and can be directly
+itegrated with Zipkin backend (http://zipkin.io).
+
+For integration instructions please refer to the README:
+
+https://github.com/uber/jaeger-client-go/blob/master/README.md
+*/
+package jaeger
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.lock b/vendor/github.com/uber/jaeger-client-go/glide.lock
new file mode 100644
index 0000000..c1ec339
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.lock
@@ -0,0 +1,105 @@
+hash: 63bec420a22b7e5abac8c602c5cc9b66a33d6a1bfec8918eecc77fd344b759ed
+updated: 2020-07-31T13:30:37.242608-04:00
+imports:
+- name: github.com/beorn7/perks
+  version: 3a771d992973f24aa725d07868b467d1ddfceafb
+  subpackages:
+  - quantile
+- name: github.com/HdrHistogram/hdrhistogram-go
+  version: 3a0bb77429bd3a61596f5e8a3172445844342120
+- name: github.com/crossdock/crossdock-go
+  version: 049aabb0122b03bc9bd30cab8f3f91fb60166361
+  subpackages:
+  - assert
+  - require
+- name: github.com/davecgh/go-spew
+  version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73
+  subpackages:
+  - spew
+- name: github.com/golang/mock
+  version: 51421b967af1f557f93a59e0057aaf15ca02e29c
+  subpackages:
+  - gomock
+- name: github.com/golang/protobuf
+  version: b5d812f8a3706043e23a9cd5babf2e5423744d30
+  subpackages:
+  - proto
+- name: github.com/matttproud/golang_protobuf_extensions
+  version: c182affec369e30f25d3eb8cd8a478dee585ae7d
+  subpackages:
+  - pbutil
+- name: github.com/opentracing/opentracing-go
+  version: d34af3eaa63c4d08ab54863a4bdd0daa45212e12
+  subpackages:
+  - ext
+  - harness
+  - log
+- name: github.com/pkg/errors
+  version: ba968bfe8b2f7e042a574c888954fccecfa385b4
+- name: github.com/pmezard/go-difflib
+  version: 5d4384ee4fb2527b0a1256a821ebfc92f91efefc
+  subpackages:
+  - difflib
+- name: github.com/prometheus/client_golang
+  version: 170205fb58decfd011f1550d4cfb737230d7ae4f
+  subpackages:
+  - prometheus
+  - prometheus/internal
+- name: github.com/prometheus/client_model
+  version: fd36f4220a901265f90734c3183c5f0c91daa0b8
+  subpackages:
+  - go
+- name: github.com/prometheus/common
+  version: 1ab4d74fc89940cfbc3c2b3a89821336cdefa119
+  subpackages:
+  - expfmt
+  - internal/bitbucket.org/ww/goautoneg
+  - model
+- name: github.com/prometheus/procfs
+  version: 8a055596020d692cf491851e47ba3e302d9f90ce
+  subpackages:
+  - internal/fs
+  - internal/util
+- name: github.com/stretchr/testify
+  version: f654a9112bbeac49ca2cd45bfbe11533c4666cf8
+  subpackages:
+  - assert
+  - mock
+  - require
+  - suite
+- name: github.com/uber-go/atomic
+  version: 845920076a298bdb984fb0f1b86052e4ca0a281c
+- name: github.com/uber/jaeger-lib
+  version: 48cc1df63e6be0d63b95677f0d22beb880bce1e4
+  subpackages:
+  - metrics
+  - metrics/metricstest
+  - metrics/prometheus
+- name: go.uber.org/atomic
+  version: 845920076a298bdb984fb0f1b86052e4ca0a281c
+- name: go.uber.org/multierr
+  version: b587143a48b62b01d337824eab43700af6ffe222
+- name: go.uber.org/zap
+  version: feeb9a050b31b40eec6f2470e7599eeeadfe5bdd
+  subpackages:
+  - buffer
+  - internal/bufferpool
+  - internal/color
+  - internal/exit
+  - zapcore
+  - zaptest/observer
+- name: golang.org/x/net
+  version: addf6b3196f61cd44ce5a76657913698c73479d0
+  subpackages:
+  - context
+  - context/ctxhttp
+- name: golang.org/x/sys
+  version: 3e129f6d46b10b0e1da36b3deffcb55e09631b64
+  subpackages:
+  - internal/unsafeheader
+  - windows
+- name: gopkg.in/yaml.v3
+  version: eeeca48fe7764f320e4870d231902bf9c1be2c08
+testImports:
+- name: github.com/stretchr/objx
+  version: 35313a95ee26395aa17d366c71a2ccf788fa69b6
diff --git a/vendor/github.com/uber/jaeger-client-go/glide.yaml b/vendor/github.com/uber/jaeger-client-go/glide.yaml
new file mode 100644
index 0000000..295678c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/glide.yaml
@@ -0,0 +1,30 @@
+package: github.com/uber/jaeger-client-go
+import:
+- package: github.com/opentracing/opentracing-go
+  version: ^1.2
+  subpackages:
+  - ext
+  - log
+- package: github.com/crossdock/crossdock-go
+- package: github.com/uber/jaeger-lib
+  version: ^2.3.0
+  subpackages:
+  - metrics
+- package: github.com/pkg/errors
+  version: ~0.8.0
+- package: go.uber.org/zap
+  source: https://github.com/uber-go/zap.git
+  version: ^1
+- package: github.com/uber-go/atomic
+  version: ^1
+- package: github.com/prometheus/client_golang
+  version: 1.1
+- package: github.com/prometheus/procfs
+  version: 0.0.6
+testImport:
+- package: github.com/stretchr/testify
+  subpackages:
+  - assert
+  - require
+  - suite
+- package: github.com/golang/mock
diff --git a/vendor/github.com/uber/jaeger-client-go/header.go b/vendor/github.com/uber/jaeger-client-go/header.go
new file mode 100644
index 0000000..5da7035
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/header.go
@@ -0,0 +1,65 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// HeadersConfig contains the values for the header keys that Jaeger will use.
+// These values may be either custom or default depending on whether custom
+// values were provided via a configuration.
+type HeadersConfig struct {
+	// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which,
+	// if found in the carrier, forces the trace to be sampled as "debug" trace.
+	// The value of the header is recorded as the tag on the root span, so that the
+	// trace can be found in the UI using this value as a correlation ID.
+	JaegerDebugHeader string `yaml:"jaegerDebugHeader"`
+
+	// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage.
+	// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where
+	// a root span does not exist.
+	JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"`
+
+	// TraceContextHeaderName is the http header name used to propagate tracing context.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceContextHeaderName string `yaml:"TraceContextHeaderName"`
+
+	// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage.
+	// This must be in lower-case to avoid mismatches when decoding incoming headers.
+	TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"`
+}
+
+// ApplyDefaults sets missing configuration keys to default values
+func (c *HeadersConfig) ApplyDefaults() *HeadersConfig {
+	if c.JaegerBaggageHeader == "" {
+		c.JaegerBaggageHeader = JaegerBaggageHeader
+	}
+	if c.JaegerDebugHeader == "" {
+		c.JaegerDebugHeader = JaegerDebugHeader
+	}
+	if c.TraceBaggageHeaderPrefix == "" {
+		c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix
+	}
+	if c.TraceContextHeaderName == "" {
+		c.TraceContextHeaderName = TraceContextHeaderName
+	}
+	return c
+}
+
+func getDefaultHeadersConfig() *HeadersConfig {
+	return &HeadersConfig{
+		JaegerDebugHeader:        JaegerDebugHeader,
+		JaegerBaggageHeader:      JaegerBaggageHeader,
+		TraceContextHeaderName:   TraceContextHeaderName,
+		TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix,
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
new file mode 100644
index 0000000..7457293
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+	"time"
+
+	"github.com/uber/jaeger-client-go"
+)
+
+const (
+	defaultMaxValueLength  = 2048
+	defaultRefreshInterval = time.Minute
+	defaultHostPort        = "localhost:5778"
+)
+
+// Option is a function that sets some option on the RestrictionManager
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+	denyBaggageOnInitializationFailure bool
+	metrics                            *jaeger.Metrics
+	logger                             jaeger.Logger
+	hostPort                           string
+	refreshInterval                    time.Duration
+}
+
+// DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager.
+// If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+// If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage
+// restrictions have been retrieved from agent.
+func (options) DenyBaggageOnInitializationFailure(b bool) Option {
+	return func(o *options) {
+		o.denyBaggageOnInitializationFailure = b
+	}
+}
+
+// Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+	return func(o *options) {
+		o.metrics = m
+	}
+}
+
+// Logger creates an Option that sets the logger used by the RestrictionManager.
+func (options) Logger(logger jaeger.Logger) Option {
+	return func(o *options) {
+		o.logger = logger
+	}
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions.
+func (options) HostPort(hostPort string) Option {
+	return func(o *options) {
+		o.hostPort = hostPort
+	}
+}
+
+// RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for
+// the baggage restrictions.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+	return func(o *options) {
+		o.refreshInterval = refreshInterval
+	}
+}
+
+func applyOptions(o ...Option) options {
+	opts := options{}
+	for _, option := range o {
+		option(&opts)
+	}
+	if opts.metrics == nil {
+		opts.metrics = jaeger.NewNullMetrics()
+	}
+	if opts.logger == nil {
+		opts.logger = jaeger.NullLogger
+	}
+	if opts.hostPort == "" {
+		opts.hostPort = defaultHostPort
+	}
+	if opts.refreshInterval == 0 {
+		opts.refreshInterval = defaultRefreshInterval
+	}
+	return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
new file mode 100644
index 0000000..2f58bb5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
@@ -0,0 +1,158 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+	"context"
+	"fmt"
+	"net/url"
+	"sync"
+	"time"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+	thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+type httpBaggageRestrictionManagerProxy struct {
+	url string
+}
+
+func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy {
+	v := url.Values{}
+	v.Set("service", serviceName)
+	return &httpBaggageRestrictionManagerProxy{
+		url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()),
+	}
+}
+
+func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(context.Context, string) ([]*thrift.BaggageRestriction, error) {
+	var out []*thrift.BaggageRestriction
+	if err := utils.GetJSON(s.url, &out); err != nil {
+		return nil, err
+	}
+	return out, nil
+}
+
+// RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent
+type RestrictionManager struct {
+	options
+
+	mux                sync.RWMutex
+	serviceName        string
+	restrictions       map[string]*baggage.Restriction
+	thriftProxy        thrift.BaggageRestrictionManager
+	pollStopped        sync.WaitGroup
+	stopPoll           chan struct{}
+	invalidRestriction *baggage.Restriction
+	validRestriction   *baggage.Restriction
+
+	// Determines if the manager has successfully retrieved baggage restrictions from agent
+	initialized bool
+}
+
+// NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest
+// baggage restrictions.
+func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager {
+	// TODO there is a developing use case where a single tracer can generate traces on behalf of many services.
+	// restrictionsMap will need to exist per service
+	opts := applyOptions(options...)
+	m := &RestrictionManager{
+		serviceName:        serviceName,
+		options:            opts,
+		restrictions:       make(map[string]*baggage.Restriction),
+		thriftProxy:        newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName),
+		stopPoll:           make(chan struct{}),
+		invalidRestriction: baggage.NewRestriction(false, 0),
+		validRestriction:   baggage.NewRestriction(true, defaultMaxValueLength),
+	}
+	m.pollStopped.Add(1)
+	go m.pollManager()
+	return m
+}
+
+// isReady returns true if the manager has retrieved baggage restrictions from the remote source.
+func (m *RestrictionManager) isReady() bool {
+	m.mux.RLock()
+	defer m.mux.RUnlock()
+	return m.initialized
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction {
+	m.mux.RLock()
+	defer m.mux.RUnlock()
+	if !m.initialized {
+		if m.denyBaggageOnInitializationFailure {
+			return m.invalidRestriction
+		}
+		return m.validRestriction
+	}
+	if restriction, ok := m.restrictions[key]; ok {
+		return restriction
+	}
+	return m.invalidRestriction
+}
+
+// Close stops remote polling and closes the RemoteRestrictionManager.
+func (m *RestrictionManager) Close() error {
+	close(m.stopPoll)
+	m.pollStopped.Wait()
+	return nil
+}
+
+func (m *RestrictionManager) pollManager() {
+	defer m.pollStopped.Done()
+	// attempt to initialize baggage restrictions
+	if err := m.updateRestrictions(); err != nil {
+		m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error()))
+	}
+	ticker := time.NewTicker(m.refreshInterval)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-ticker.C:
+			if err := m.updateRestrictions(); err != nil {
+				m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error()))
+			}
+		case <-m.stopPoll:
+			return
+		}
+	}
+}
+
+func (m *RestrictionManager) updateRestrictions() error {
+	restrictions, err := m.thriftProxy.GetBaggageRestrictions(context.Background(), m.serviceName)
+	if err != nil {
+		m.metrics.BaggageRestrictionsUpdateFailure.Inc(1)
+		return err
+	}
+	newRestrictions := m.parseRestrictions(restrictions)
+	m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1)
+	m.mux.Lock()
+	defer m.mux.Unlock()
+	m.initialized = true
+	m.restrictions = newRestrictions
+	return nil
+}
+
+func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction {
+	setters := make(map[string]*baggage.Restriction, len(restrictions))
+	for _, restriction := range restrictions {
+		setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength))
+	}
+	return setters
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
new file mode 100644
index 0000000..c16a5c5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package baggage
+
+const (
+	defaultMaxValueLength = 2048
+)
+
+// Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value.
+type Restriction struct {
+	keyAllowed     bool
+	maxValueLength int
+}
+
+// NewRestriction returns a new Restriction.
+func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction {
+	return &Restriction{
+		keyAllowed:     keyAllowed,
+		maxValueLength: maxValueLength,
+	}
+}
+
+// KeyAllowed returns whether the baggage key for this restriction is allowed.
+func (r *Restriction) KeyAllowed() bool {
+	return r.keyAllowed
+}
+
+// MaxValueLength returns the max length for the baggage value.
+func (r *Restriction) MaxValueLength() int {
+	return r.maxValueLength
+}
+
+// RestrictionManager keeps track of valid baggage keys and their restrictions. The manager
+// will return a Restriction for a specific baggage key which will determine whether the baggage
+// key is allowed for the current service and any other applicable restrictions on the baggage
+// value.
+type RestrictionManager interface {
+	GetRestriction(service, key string) *Restriction
+}
+
+// DefaultRestrictionManager allows any baggage key.
+type DefaultRestrictionManager struct {
+	defaultRestriction *Restriction
+}
+
+// NewDefaultRestrictionManager returns a DefaultRestrictionManager.
+func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager {
+	if maxValueLength == 0 {
+		maxValueLength = defaultMaxValueLength
+	}
+	return &DefaultRestrictionManager{
+		defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength},
+	}
+}
+
+// GetRestriction implements RestrictionManager#GetRestriction.
+func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction {
+	return m.defaultRestriction
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
new file mode 100644
index 0000000..fe0bef2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/reporterstats/stats.go
@@ -0,0 +1,25 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package reporterstats
+
+// ReporterStats exposes some metrics from the RemoteReporter.
+type ReporterStats interface {
+	SpansDroppedFromQueue() int64
+}
+
+// Receiver can be implemented by a Transport to be given ReporterStats.
+type Receiver interface {
+	SetReporterStats(ReporterStats)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
new file mode 100644
index 0000000..0e10b8a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
@@ -0,0 +1,81 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package spanlog
+
+import (
+	"encoding/json"
+	"fmt"
+
+	"github.com/opentracing/opentracing-go/log"
+)
+
+type fieldsAsMap map[string]string
+
+// MaterializeWithJSON converts log Fields into JSON string
+// TODO refactor into pluggable materializer
+func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
+	fields := fieldsAsMap(make(map[string]string, len(logFields)))
+	for _, field := range logFields {
+		field.Marshal(fields)
+	}
+	if event, ok := fields["event"]; ok && len(fields) == 1 {
+		return []byte(event), nil
+	}
+	return json.Marshal(fields)
+}
+
+func (ml fieldsAsMap) EmitString(key, value string) {
+	ml[key] = value
+}
+
+func (ml fieldsAsMap) EmitBool(key string, value bool) {
+	ml[key] = fmt.Sprintf("%t", value)
+}
+
+func (ml fieldsAsMap) EmitInt(key string, value int) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt32(key string, value int32) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitInt64(key string, value int64) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
+	ml[key] = fmt.Sprintf("%d", value)
+}
+
+func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
+	ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
+	ml[key] = fmt.Sprintf("%f", value)
+}
+
+func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
+	ml[key] = fmt.Sprintf("%+v", value)
+}
+
+func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
+	value(ml)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
new file mode 100644
index 0000000..f52c322
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
@@ -0,0 +1,99 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+	"time"
+
+	"github.com/uber/jaeger-client-go"
+)
+
+const (
+	defaultHostPort        = "localhost:5778"
+	defaultRefreshInterval = time.Second * 5
+)
+
+// Option is a function that sets some option on the Throttler
+type Option func(options *options)
+
+// Options is a factory for all available options
+var Options options
+
+type options struct {
+	metrics                   *jaeger.Metrics
+	logger                    jaeger.Logger
+	hostPort                  string
+	refreshInterval           time.Duration
+	synchronousInitialization bool
+}
+
+// Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics.
+func (options) Metrics(m *jaeger.Metrics) Option {
+	return func(o *options) {
+		o.metrics = m
+	}
+}
+
+// Logger creates an Option that sets the logger used by the Throttler.
+func (options) Logger(logger jaeger.Logger) Option {
+	return func(o *options) {
+		o.logger = logger
+	}
+}
+
+// HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits.
+func (options) HostPort(hostPort string) Option {
+	return func(o *options) {
+		o.hostPort = hostPort
+	}
+}
+
+// RefreshInterval creates an Option that sets how often the Throttler will poll local agent for
+// credits.
+func (options) RefreshInterval(refreshInterval time.Duration) Option {
+	return func(o *options) {
+		o.refreshInterval = refreshInterval
+	}
+}
+
+// SynchronousInitialization creates an Option that determines whether the throttler should synchronously
+// fetch credits from the agent when an operation is seen for the first time. This should be set to true
+// if the client will be used by a short lived service that needs to ensure that credits are fetched upfront
+// such that sampling or throttling occurs.
+func (options) SynchronousInitialization(b bool) Option {
+	return func(o *options) {
+		o.synchronousInitialization = b
+	}
+}
+
+func applyOptions(o ...Option) options {
+	opts := options{}
+	for _, option := range o {
+		option(&opts)
+	}
+	if opts.metrics == nil {
+		opts.metrics = jaeger.NewNullMetrics()
+	}
+	if opts.logger == nil {
+		opts.logger = jaeger.NullLogger
+	}
+	if opts.hostPort == "" {
+		opts.hostPort = defaultHostPort
+	}
+	if opts.refreshInterval == 0 {
+		opts.refreshInterval = defaultRefreshInterval
+	}
+	return opts
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
new file mode 100644
index 0000000..20f434f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
@@ -0,0 +1,216 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package remote
+
+import (
+	"fmt"
+	"net/url"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/pkg/errors"
+
+	"github.com/uber/jaeger-client-go"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+	// minimumCredits is the minimum amount of credits necessary to not be throttled.
+	// i.e. if currentCredits > minimumCredits, then the operation will not be throttled.
+	minimumCredits = 1.0
+)
+
+var (
+	errorUUIDNotSet = errors.New("Throttler UUID must be set")
+)
+
+type operationBalance struct {
+	Operation string  `json:"operation"`
+	Balance   float64 `json:"balance"`
+}
+
+type creditResponse struct {
+	Balances []operationBalance `json:"balances"`
+}
+
+type httpCreditManagerProxy struct {
+	hostPort string
+}
+
+func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy {
+	return &httpCreditManagerProxy{
+		hostPort: hostPort,
+	}
+}
+
+// N.B. Operations list must not be empty.
+func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) {
+	params := url.Values{}
+	params.Set("service", serviceName)
+	params.Set("uuid", uuid)
+	for _, op := range operations {
+		params.Add("operations", op)
+	}
+	var resp creditResponse
+	if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil {
+		return nil, errors.Wrap(err, "Failed to receive credits from agent")
+	}
+	return &resp, nil
+}
+
+// Throttler retrieves credits from agent and uses it to throttle operations.
+type Throttler struct {
+	options
+
+	mux           sync.RWMutex
+	service       string
+	uuid          atomic.Value
+	creditManager *httpCreditManagerProxy
+	credits       map[string]float64 // map of operation->credits
+	close         chan struct{}
+	stopped       sync.WaitGroup
+}
+
+// NewThrottler returns a Throttler that polls agent for credits and uses them to throttle
+// the service.
+func NewThrottler(service string, options ...Option) *Throttler {
+	opts := applyOptions(options...)
+	creditManager := newHTTPCreditManagerProxy(opts.hostPort)
+	t := &Throttler{
+		options:       opts,
+		creditManager: creditManager,
+		service:       service,
+		credits:       make(map[string]float64),
+		close:         make(chan struct{}),
+	}
+	t.stopped.Add(1)
+	go t.pollManager()
+	return t
+}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t *Throttler) IsAllowed(operation string) bool {
+	t.mux.Lock()
+	defer t.mux.Unlock()
+	value, ok := t.credits[operation]
+	if !ok || value == 0 {
+		if !ok {
+			// NOTE: This appears to be a no-op at first glance, but it stores
+			// the operation key in the map. Necessary for functionality of
+			// Throttler#operations method.
+			t.credits[operation] = 0
+		}
+		if !t.synchronousInitialization {
+			t.metrics.ThrottledDebugSpans.Inc(1)
+			return false
+		}
+		// If it is the first time this operation is being checked, synchronously fetch
+		// the credits.
+		credits, err := t.fetchCredits([]string{operation})
+		if err != nil {
+			// Failed to receive credits from agent, try again next time
+			t.logger.Error("Failed to fetch credits: " + err.Error())
+			return false
+		}
+		if len(credits.Balances) == 0 {
+			// This shouldn't happen but just in case
+			return false
+		}
+		for _, opBalance := range credits.Balances {
+			t.credits[opBalance.Operation] += opBalance.Balance
+		}
+	}
+	return t.isAllowed(operation)
+}
+
+// Close stops the throttler from fetching credits from remote.
+func (t *Throttler) Close() error {
+	close(t.close)
+	t.stopped.Wait()
+	return nil
+}
+
+// SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote
+// requests are made.
+func (t *Throttler) SetProcess(process jaeger.Process) {
+	if process.UUID != "" {
+		t.uuid.Store(process.UUID)
+	}
+}
+
+// N.B. This function must be called with the Write Lock
+func (t *Throttler) isAllowed(operation string) bool {
+	credits := t.credits[operation]
+	if credits < minimumCredits {
+		t.metrics.ThrottledDebugSpans.Inc(1)
+		return false
+	}
+	t.credits[operation] = credits - minimumCredits
+	return true
+}
+
+func (t *Throttler) pollManager() {
+	defer t.stopped.Done()
+	ticker := time.NewTicker(t.refreshInterval)
+	defer ticker.Stop()
+	for {
+		select {
+		case <-ticker.C:
+			t.refreshCredits()
+		case <-t.close:
+			return
+		}
+	}
+}
+
+func (t *Throttler) operations() []string {
+	t.mux.RLock()
+	defer t.mux.RUnlock()
+	operations := make([]string, 0, len(t.credits))
+	for op := range t.credits {
+		operations = append(operations, op)
+	}
+	return operations
+}
+
+func (t *Throttler) refreshCredits() {
+	operations := t.operations()
+	if len(operations) == 0 {
+		return
+	}
+	newCredits, err := t.fetchCredits(operations)
+	if err != nil {
+		t.metrics.ThrottlerUpdateFailure.Inc(1)
+		t.logger.Error("Failed to fetch credits: " + err.Error())
+		return
+	}
+	t.metrics.ThrottlerUpdateSuccess.Inc(1)
+
+	t.mux.Lock()
+	defer t.mux.Unlock()
+	for _, opBalance := range newCredits.Balances {
+		t.credits[opBalance.Operation] += opBalance.Balance
+	}
+}
+
+func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) {
+	uuid := t.uuid.Load()
+	uuidStr, _ := uuid.(string)
+	if uuid == nil || uuidStr == "" {
+		return nil, errorUUIDNotSet
+	}
+	return t.creditManager.FetchCredits(uuidStr, t.service, operations)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
new file mode 100644
index 0000000..196ed69
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
@@ -0,0 +1,32 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package throttler
+
+// Throttler is used to rate limits operations. For example, given how debug spans
+// are always sampled, a throttler can be enabled per client to rate limit the amount
+// of debug spans a client can start.
+type Throttler interface {
+	// IsAllowed determines whether the operation should be allowed and not be
+	// throttled.
+	IsAllowed(operation string) bool
+}
+
+// DefaultThrottler doesn't throttle at all.
+type DefaultThrottler struct{}
+
+// IsAllowed implements Throttler#IsAllowed.
+func (t DefaultThrottler) IsAllowed(operation string) bool {
+	return true
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/interop.go b/vendor/github.com/uber/jaeger-client-go/interop.go
new file mode 100644
index 0000000..8402d08
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/interop.go
@@ -0,0 +1,55 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/opentracing/opentracing-go"
+)
+
+// TODO this file should not be needed after TChannel PR.
+
+type formatKey int
+
+// SpanContextFormat is a constant used as OpenTracing Format.
+// Requires *SpanContext as carrier.
+// This format is intended for interop with TChannel or other Zipkin-like tracers.
+const SpanContextFormat formatKey = iota
+
+type jaegerTraceContextPropagator struct {
+	tracer *Tracer
+}
+
+func (p *jaegerTraceContextPropagator) Inject(
+	ctx SpanContext,
+	abstractCarrier interface{},
+) error {
+	carrier, ok := abstractCarrier.(*SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	carrier.CopyFrom(&ctx)
+	return nil
+}
+
+func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	carrier, ok := abstractCarrier.(*SpanContext)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	ctx := new(SpanContext)
+	ctx.CopyFrom(carrier)
+	return *ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
new file mode 100644
index 0000000..868b2a5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+
+	"github.com/opentracing/opentracing-go/log"
+
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+type tags []*j.Tag
+
+// ConvertLogsToJaegerTags converts log Fields into jaeger tags.
+func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag {
+	fields := tags(make([]*j.Tag, 0, len(logFields)))
+	for _, field := range logFields {
+		field.Marshal(&fields)
+	}
+	return fields
+}
+
+func (t *tags) EmitString(key, value string) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value})
+}
+
+func (t *tags) EmitBool(key string, value bool) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value})
+}
+
+func (t *tags) EmitInt(key string, value int) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt32(key string, value int32) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitInt64(key string, value int64) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value})
+}
+
+func (t *tags) EmitUint32(key string, value uint32) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitUint64(key string, value uint64) {
+	vLong := int64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong})
+}
+
+func (t *tags) EmitFloat32(key string, value float32) {
+	vDouble := float64(value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble})
+}
+
+func (t *tags) EmitFloat64(key string, value float64) {
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value})
+}
+
+func (t *tags) EmitObject(key string, value interface{}) {
+	vStr := fmt.Sprintf("%+v", value)
+	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr})
+}
+
+func (t *tags) EmitLazyLogger(value log.LazyLogger) {
+	value(t)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
new file mode 100644
index 0000000..3ac2f8f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
@@ -0,0 +1,181 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+// BuildJaegerThrift builds jaeger span based on internal span.
+// TODO: (breaking change) move to internal package.
+func BuildJaegerThrift(span *Span) *j.Span {
+	span.Lock()
+	defer span.Unlock()
+	startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+	duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+	jaegerSpan := &j.Span{
+		TraceIdLow:    int64(span.context.traceID.Low),
+		TraceIdHigh:   int64(span.context.traceID.High),
+		SpanId:        int64(span.context.spanID),
+		ParentSpanId:  int64(span.context.parentID),
+		OperationName: span.operationName,
+		Flags:         int32(span.context.samplingState.flags()),
+		StartTime:     startTime,
+		Duration:      duration,
+		Tags:          buildTags(span.tags, span.tracer.options.maxTagValueLength),
+		Logs:          buildLogs(span.logs),
+		References:    buildReferences(span.references),
+	}
+	return jaegerSpan
+}
+
+// BuildJaegerProcessThrift creates a thrift Process type.
+// TODO: (breaking change) move to internal package.
+func BuildJaegerProcessThrift(span *Span) *j.Process {
+	span.Lock()
+	defer span.Unlock()
+	return buildJaegerProcessThrift(span.tracer)
+}
+
+func buildJaegerProcessThrift(tracer *Tracer) *j.Process {
+	process := &j.Process{
+		ServiceName: tracer.serviceName,
+		Tags:        buildTags(tracer.tags, tracer.options.maxTagValueLength),
+	}
+	if tracer.process.UUID != "" {
+		process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING})
+	}
+	return process
+}
+
+func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag {
+	jTags := make([]*j.Tag, 0, len(tags))
+	for _, tag := range tags {
+		jTag := buildTag(&tag, maxTagValueLength)
+		jTags = append(jTags, jTag)
+	}
+	return jTags
+}
+
+func buildLogs(logs []opentracing.LogRecord) []*j.Log {
+	jLogs := make([]*j.Log, 0, len(logs))
+	for _, log := range logs {
+		jLog := &j.Log{
+			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+			Fields:    ConvertLogsToJaegerTags(log.Fields),
+		}
+		jLogs = append(jLogs, jLog)
+	}
+	return jLogs
+}
+
+func buildTag(tag *Tag, maxTagValueLength int) *j.Tag {
+	jTag := &j.Tag{Key: tag.key}
+	switch value := tag.value.(type) {
+	case string:
+		vStr := truncateString(value, maxTagValueLength)
+		jTag.VStr = &vStr
+		jTag.VType = j.TagType_STRING
+	case []byte:
+		if len(value) > maxTagValueLength {
+			value = value[:maxTagValueLength]
+		}
+		jTag.VBinary = value
+		jTag.VType = j.TagType_BINARY
+	case int:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int8:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint8:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int16:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint16:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int32:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint32:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case int64:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case uint64:
+		vLong := int64(value)
+		jTag.VLong = &vLong
+		jTag.VType = j.TagType_LONG
+	case float32:
+		vDouble := float64(value)
+		jTag.VDouble = &vDouble
+		jTag.VType = j.TagType_DOUBLE
+	case float64:
+		vDouble := float64(value)
+		jTag.VDouble = &vDouble
+		jTag.VType = j.TagType_DOUBLE
+	case bool:
+		vBool := value
+		jTag.VBool = &vBool
+		jTag.VType = j.TagType_BOOL
+	default:
+		vStr := truncateString(stringify(value), maxTagValueLength)
+		jTag.VStr = &vStr
+		jTag.VType = j.TagType_STRING
+	}
+	return jTag
+}
+
+func buildReferences(references []Reference) []*j.SpanRef {
+	retMe := make([]*j.SpanRef, 0, len(references))
+	for _, ref := range references {
+		if ref.Type == opentracing.ChildOfRef {
+			retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF))
+		} else if ref.Type == opentracing.FollowsFromRef {
+			retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM))
+		}
+	}
+	return retMe
+}
+
+func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef {
+	return &j.SpanRef{
+		RefType:     refType,
+		TraceIdLow:  int64(ctx.traceID.Low),
+		TraceIdHigh: int64(ctx.traceID.High),
+		SpanId:      int64(ctx.spanID),
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/log/logger.go b/vendor/github.com/uber/jaeger-client-go/log/logger.go
new file mode 100644
index 0000000..ced6e0c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/log/logger.go
@@ -0,0 +1,141 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package log
+
+import (
+	"bytes"
+	"fmt"
+	"log"
+	"sync"
+)
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+	// Error logs a message at error priority
+	Error(msg string)
+
+	// Infof logs a message at info priority
+	Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+	log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+	log.Printf(msg, args...)
+}
+
+// Debugf logs a message at debug priority
+func (l *stdLogger) Debugf(msg string, args ...interface{}) {
+	log.Printf(fmt.Sprintf("DEBUG: %s", msg), args...)
+}
+
+// NullLogger is implementation of the Logger interface that is no-op
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string)                       {}
+func (l *nullLogger) Infof(msg string, args ...interface{})  {}
+func (l *nullLogger) Debugf(msg string, args ...interface{}) {}
+
+// BytesBufferLogger implements Logger backed by a bytes.Buffer.
+type BytesBufferLogger struct {
+	mux sync.Mutex
+	buf bytes.Buffer
+}
+
+// Error implements Logger.
+func (l *BytesBufferLogger) Error(msg string) {
+	l.mux.Lock()
+	l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg))
+	l.mux.Unlock()
+}
+
+// Infof implements Logger.
+func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) {
+	l.mux.Lock()
+	l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n")
+	l.mux.Unlock()
+}
+
+// Debugf implements Logger.
+func (l *BytesBufferLogger) Debugf(msg string, args ...interface{}) {
+	l.mux.Lock()
+	l.buf.WriteString("DEBUG: " + fmt.Sprintf(msg, args...) + "\n")
+	l.mux.Unlock()
+}
+
+// String returns string representation of the underlying buffer.
+func (l *BytesBufferLogger) String() string {
+	l.mux.Lock()
+	defer l.mux.Unlock()
+	return l.buf.String()
+}
+
+// Flush empties the underlying buffer.
+func (l *BytesBufferLogger) Flush() {
+	l.mux.Lock()
+	defer l.mux.Unlock()
+	l.buf.Reset()
+}
+
+// DebugLogger is an interface which adds a debug logging level
+type DebugLogger interface {
+	Logger
+
+	// Debugf logs a message at debug priority
+	Debugf(msg string, args ...interface{})
+}
+
+// DebugLogAdapter is a log adapter that converts a Logger into a DebugLogger
+// If the provided Logger doesn't satisfy the interface, a logger with debug
+// disabled is returned
+func DebugLogAdapter(logger Logger) DebugLogger {
+	if logger == nil {
+		return nil
+	}
+	if debugLogger, ok := logger.(DebugLogger); ok {
+		return debugLogger
+	}
+	logger.Infof("debug logging disabled")
+	return debugDisabledLogAdapter{logger: logger}
+}
+
+type debugDisabledLogAdapter struct {
+	logger Logger
+}
+
+func (d debugDisabledLogAdapter) Error(msg string) {
+	d.logger.Error(msg)
+}
+
+func (d debugDisabledLogAdapter) Infof(msg string, args ...interface{}) {
+	d.logger.Infof(msg, args...)
+}
+
+// Debugf is a nop
+func (d debugDisabledLogAdapter) Debugf(msg string, args ...interface{}) {
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/logger.go b/vendor/github.com/uber/jaeger-client-go/logger.go
new file mode 100644
index 0000000..d4f0b50
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/logger.go
@@ -0,0 +1,53 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "log"
+
+// NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead.
+
+// Logger provides an abstract interface for logging from Reporters.
+// Applications can provide their own implementation of this interface to adapt
+// reporters logging to whatever logging library they prefer (stdlib log,
+// logrus, go-logging, etc).
+type Logger interface {
+	// Error logs a message at error priority
+	Error(msg string)
+
+	// Infof logs a message at info priority
+	Infof(msg string, args ...interface{})
+}
+
+// StdLogger is implementation of the Logger interface that delegates to default `log` package
+var StdLogger = &stdLogger{}
+
+type stdLogger struct{}
+
+func (l *stdLogger) Error(msg string) {
+	log.Printf("ERROR: %s", msg)
+}
+
+// Infof logs a message at info priority
+func (l *stdLogger) Infof(msg string, args ...interface{}) {
+	log.Printf(msg, args...)
+}
+
+// NullLogger is implementation of the Logger interface that delegates to default `log` package
+var NullLogger = &nullLogger{}
+
+type nullLogger struct{}
+
+func (l *nullLogger) Error(msg string)                      {}
+func (l *nullLogger) Infof(msg string, args ...interface{}) {}
diff --git a/vendor/github.com/uber/jaeger-client-go/metrics.go b/vendor/github.com/uber/jaeger-client-go/metrics.go
new file mode 100644
index 0000000..50e4e22
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/metrics.go
@@ -0,0 +1,119 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/uber/jaeger-lib/metrics"
+)
+
+// Metrics is a container of all stats emitted by Jaeger tracer.
+type Metrics struct {
+	// Number of traces started by this tracer as sampled
+	TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y" help:"Number of traces started by this tracer as sampled"`
+
+	// Number of traces started by this tracer as not sampled
+	TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer as not sampled"`
+
+	// Number of traces started by this tracer with delayed sampling
+	TracesStartedDelayedSampling metrics.Counter `metric:"traces" tags:"state=started,sampled=n" help:"Number of traces started by this tracer with delayed sampling"`
+
+	// Number of externally started sampled traces this tracer joined
+	TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y" help:"Number of externally started sampled traces this tracer joined"`
+
+	// Number of externally started not-sampled traces this tracer joined
+	TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n" help:"Number of externally started not-sampled traces this tracer joined"`
+
+	// Number of sampled spans started by this tracer
+	SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y" help:"Number of spans started by this tracer as sampled"`
+
+	// Number of not sampled spans started by this tracer
+	SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n" help:"Number of spans started by this tracer as not sampled"`
+
+	// Number of spans with delayed sampling started by this tracer
+	SpansStartedDelayedSampling metrics.Counter `metric:"started_spans" tags:"sampled=delayed" help:"Number of spans started by this tracer with delayed sampling"`
+
+	// Number of spans finished by this tracer
+	SpansFinishedSampled metrics.Counter `metric:"finished_spans" tags:"sampled=y" help:"Number of sampled spans finished by this tracer"`
+
+	// Number of spans finished by this tracer
+	SpansFinishedNotSampled metrics.Counter `metric:"finished_spans" tags:"sampled=n" help:"Number of not-sampled spans finished by this tracer"`
+
+	// Number of spans finished by this tracer
+	SpansFinishedDelayedSampling metrics.Counter `metric:"finished_spans" tags:"sampled=delayed" help:"Number of spans with delayed sampling finished by this tracer"`
+
+	// Number of errors decoding tracing context
+	DecodingErrors metrics.Counter `metric:"span_context_decoding_errors" help:"Number of errors decoding tracing context"`
+
+	// Number of spans successfully reported
+	ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok" help:"Number of spans successfully reported"`
+
+	// Number of spans not reported due to a Sender failure
+	ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err" help:"Number of spans not reported due to a Sender failure"`
+
+	// Number of spans dropped due to internal queue overflow
+	ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped" help:"Number of spans dropped due to internal queue overflow"`
+
+	// Current number of spans in the reporter queue
+	ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length" help:"Current number of spans in the reporter queue"`
+
+	// Number of times the Sampler succeeded to retrieve sampling strategy
+	SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve sampling strategy"`
+
+	// Number of times the Sampler failed to retrieve sampling strategy
+	SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err" help:"Number of times the Sampler failed to retrieve sampling strategy"`
+
+	// Number of times the Sampler succeeded to retrieve and update sampling strategy
+	SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok" help:"Number of times the Sampler succeeded to retrieve and update sampling strategy"`
+
+	// Number of times the Sampler failed to update sampling strategy
+	SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err" help:"Number of times the Sampler failed to update sampling strategy"`
+
+	// Number of times baggage was successfully written or updated on spans.
+	BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok" help:"Number of times baggage was successfully written or updated on spans"`
+
+	// Number of times baggage failed to write or update on spans.
+	BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err" help:"Number of times baggage failed to write or update on spans"`
+
+	// Number of times baggage was truncated as per baggage restrictions.
+	BaggageTruncate metrics.Counter `metric:"baggage_truncations" help:"Number of times baggage was truncated as per baggage restrictions"`
+
+	// Number of times baggage restrictions were successfully updated.
+	BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok" help:"Number of times baggage restrictions were successfully updated"`
+
+	// Number of times baggage restrictions failed to update.
+	BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err" help:"Number of times baggage restrictions failed to update"`
+
+	// Number of times debug spans were throttled.
+	ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans" help:"Number of times debug spans were throttled"`
+
+	// Number of times throttler successfully updated.
+	ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok" help:"Number of times throttler successfully updated"`
+
+	// Number of times throttler failed to update.
+	ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err" help:"Number of times throttler failed to update"`
+}
+
+// NewMetrics creates a new Metrics struct and initializes it.
+func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics {
+	m := &Metrics{}
+	// TODO the namespace "jaeger" should be configurable
+	metrics.MustInit(m, factory.Namespace(metrics.NSOptions{Name: "jaeger"}).Namespace(metrics.NSOptions{Name: "tracer"}), globalTags)
+	return m
+}
+
+// NewNullMetrics creates a new Metrics struct that won't report any metrics.
+func NewNullMetrics() *Metrics {
+	return NewMetrics(metrics.NullFactory, nil)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/observer.go b/vendor/github.com/uber/jaeger-client-go/observer.go
new file mode 100644
index 0000000..7bbd028
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/observer.go
@@ -0,0 +1,88 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import opentracing "github.com/opentracing/opentracing-go"
+
+// Observer can be registered with the Tracer to receive notifications about
+// new Spans.
+//
+// Deprecated: use jaeger.ContribObserver instead.
+type Observer interface {
+	OnStartSpan(operationName string, options opentracing.StartSpanOptions) SpanObserver
+}
+
+// SpanObserver is created by the Observer and receives notifications about
+// other Span events.
+//
+// Deprecated: use jaeger.ContribSpanObserver instead.
+type SpanObserver interface {
+	OnSetOperationName(operationName string)
+	OnSetTag(key string, value interface{})
+	OnFinish(options opentracing.FinishOptions)
+}
+
+// compositeObserver is a dispatcher to other observers
+type compositeObserver struct {
+	observers []ContribObserver
+}
+
+// compositeSpanObserver is a dispatcher to other span observers
+type compositeSpanObserver struct {
+	observers []ContribSpanObserver
+}
+
+// noopSpanObserver is used when there are no observers registered
+// on the Tracer or none of them returns span observers from OnStartSpan.
+var noopSpanObserver = &compositeSpanObserver{}
+
+func (o *compositeObserver) append(contribObserver ContribObserver) {
+	o.observers = append(o.observers, contribObserver)
+}
+
+func (o *compositeObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) ContribSpanObserver {
+	var spanObservers []ContribSpanObserver
+	for _, obs := range o.observers {
+		spanObs, ok := obs.OnStartSpan(sp, operationName, options)
+		if ok {
+			if spanObservers == nil {
+				spanObservers = make([]ContribSpanObserver, 0, len(o.observers))
+			}
+			spanObservers = append(spanObservers, spanObs)
+		}
+	}
+	if len(spanObservers) == 0 {
+		return noopSpanObserver
+	}
+	return &compositeSpanObserver{observers: spanObservers}
+}
+
+func (o *compositeSpanObserver) OnSetOperationName(operationName string) {
+	for _, obs := range o.observers {
+		obs.OnSetOperationName(operationName)
+	}
+}
+
+func (o *compositeSpanObserver) OnSetTag(key string, value interface{}) {
+	for _, obs := range o.observers {
+		obs.OnSetTag(key, value)
+	}
+}
+
+func (o *compositeSpanObserver) OnFinish(options opentracing.FinishOptions) {
+	for _, obs := range o.observers {
+		obs.OnFinish(options)
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/process.go b/vendor/github.com/uber/jaeger-client-go/process.go
new file mode 100644
index 0000000..30cbf99
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/process.go
@@ -0,0 +1,29 @@
+// Copyright (c) 2018 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// Process holds process specific metadata that's relevant to this client.
+type Process struct {
+	Service string
+	UUID    string
+	Tags    []Tag
+}
+
+// ProcessSetter sets a process. This can be used by any class that requires
+// the process to be set as part of initialization.
+// See internal/throttler/remote/throttler.go for an example.
+type ProcessSetter interface {
+	SetProcess(process Process)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/propagation.go b/vendor/github.com/uber/jaeger-client-go/propagation.go
new file mode 100644
index 0000000..e06459b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/propagation.go
@@ -0,0 +1,325 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"bytes"
+	"encoding/binary"
+	"fmt"
+	"io"
+	"log"
+	"net/url"
+	"strings"
+	"sync"
+
+	opentracing "github.com/opentracing/opentracing-go"
+)
+
+// Injector is responsible for injecting SpanContext instances in a manner suitable
+// for propagation via a format-specific "carrier" object. Typically the
+// injection will take place across an RPC boundary, but message queues and
+// other IPC mechanisms are also reasonable places to use an Injector.
+type Injector interface {
+	// Inject takes `SpanContext` and injects it into `carrier`. The actual type
+	// of `carrier` depends on the `format` passed to `Tracer.Inject()`.
+	//
+	// Implementations may return opentracing.ErrInvalidCarrier or any other
+	// implementation-specific error if injection fails.
+	Inject(ctx SpanContext, carrier interface{}) error
+}
+
+// Extractor is responsible for extracting SpanContext instances from a
+// format-specific "carrier" object. Typically the extraction will take place
+// on the server side of an RPC boundary, but message queues and other IPC
+// mechanisms are also reasonable places to use an Extractor.
+type Extractor interface {
+	// Extract decodes a SpanContext instance from the given `carrier`,
+	// or (nil, opentracing.ErrSpanContextNotFound) if no context could
+	// be found in the `carrier`.
+	Extract(carrier interface{}) (SpanContext, error)
+}
+
+// TextMapPropagator is a combined Injector and Extractor for TextMap format
+type TextMapPropagator struct {
+	headerKeys  *HeadersConfig
+	metrics     Metrics
+	encodeValue func(string) string
+	decodeValue func(string) string
+}
+
+// NewTextMapPropagator creates a combined Injector and Extractor for TextMap format
+func NewTextMapPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+	return &TextMapPropagator{
+		headerKeys: headerKeys,
+		metrics:    metrics,
+		encodeValue: func(val string) string {
+			return val
+		},
+		decodeValue: func(val string) string {
+			return val
+		},
+	}
+}
+
+// NewHTTPHeaderPropagator creates a combined Injector and Extractor for HTTPHeaders format
+func NewHTTPHeaderPropagator(headerKeys *HeadersConfig, metrics Metrics) *TextMapPropagator {
+	return &TextMapPropagator{
+		headerKeys: headerKeys,
+		metrics:    metrics,
+		encodeValue: func(val string) string {
+			return url.QueryEscape(val)
+		},
+		decodeValue: func(val string) string {
+			// ignore decoding errors, cannot do anything about them
+			if v, err := url.QueryUnescape(val); err == nil {
+				return v
+			}
+			return val
+		},
+	}
+}
+
+// BinaryPropagator is a combined Injector and Extractor for Binary format
+type BinaryPropagator struct {
+	tracer  *Tracer
+	buffers sync.Pool
+}
+
+// NewBinaryPropagator creates a combined Injector and Extractor for Binary format
+func NewBinaryPropagator(tracer *Tracer) *BinaryPropagator {
+	return &BinaryPropagator{
+		tracer:  tracer,
+		buffers: sync.Pool{New: func() interface{} { return &bytes.Buffer{} }},
+	}
+}
+
+// Inject implements Injector of TextMapPropagator
+func (p *TextMapPropagator) Inject(
+	sc SpanContext,
+	abstractCarrier interface{},
+) error {
+	textMapWriter, ok := abstractCarrier.(opentracing.TextMapWriter)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	// Do not encode the string with trace context to avoid accidental double-encoding
+	// if people are using opentracing < 0.10.0. Our colon-separated representation
+	// of the trace context is already safe for HTTP headers.
+	textMapWriter.Set(p.headerKeys.TraceContextHeaderName, sc.String())
+	for k, v := range sc.baggage {
+		safeKey := p.addBaggageKeyPrefix(k)
+		safeVal := p.encodeValue(v)
+		textMapWriter.Set(safeKey, safeVal)
+	}
+	return nil
+}
+
+// Extract implements Extractor of TextMapPropagator
+func (p *TextMapPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	textMapReader, ok := abstractCarrier.(opentracing.TextMapReader)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	var ctx SpanContext
+	var baggage map[string]string
+	err := textMapReader.ForeachKey(func(rawKey, value string) error {
+		key := strings.ToLower(rawKey) // TODO not necessary for plain TextMap
+		if key == p.headerKeys.TraceContextHeaderName {
+			var err error
+			safeVal := p.decodeValue(value)
+			if ctx, err = ContextFromString(safeVal); err != nil {
+				return err
+			}
+		} else if key == p.headerKeys.JaegerDebugHeader {
+			ctx.debugID = p.decodeValue(value)
+		} else if key == p.headerKeys.JaegerBaggageHeader {
+			if baggage == nil {
+				baggage = make(map[string]string)
+			}
+			for k, v := range p.parseCommaSeparatedMap(value) {
+				baggage[k] = v
+			}
+		} else if strings.HasPrefix(key, p.headerKeys.TraceBaggageHeaderPrefix) {
+			if baggage == nil {
+				baggage = make(map[string]string)
+			}
+			safeKey := p.removeBaggageKeyPrefix(key)
+			safeVal := p.decodeValue(value)
+			baggage[safeKey] = safeVal
+		}
+		return nil
+	})
+	if err != nil {
+		p.metrics.DecodingErrors.Inc(1)
+		return emptyContext, err
+	}
+	if !ctx.traceID.IsValid() && ctx.debugID == "" && len(baggage) == 0 {
+		return emptyContext, opentracing.ErrSpanContextNotFound
+	}
+	ctx.baggage = baggage
+	return ctx, nil
+}
+
+// Inject implements Injector of BinaryPropagator
+func (p *BinaryPropagator) Inject(
+	sc SpanContext,
+	abstractCarrier interface{},
+) error {
+	carrier, ok := abstractCarrier.(io.Writer)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	// Handle the tracer context
+	if err := binary.Write(carrier, binary.BigEndian, sc.traceID); err != nil {
+		return err
+	}
+	if err := binary.Write(carrier, binary.BigEndian, sc.spanID); err != nil {
+		return err
+	}
+	if err := binary.Write(carrier, binary.BigEndian, sc.parentID); err != nil {
+		return err
+	}
+	if err := binary.Write(carrier, binary.BigEndian, sc.samplingState.flags()); err != nil {
+		return err
+	}
+
+	// Handle the baggage items
+	if err := binary.Write(carrier, binary.BigEndian, int32(len(sc.baggage))); err != nil {
+		return err
+	}
+	for k, v := range sc.baggage {
+		if err := binary.Write(carrier, binary.BigEndian, int32(len(k))); err != nil {
+			return err
+		}
+		io.WriteString(carrier, k)
+		if err := binary.Write(carrier, binary.BigEndian, int32(len(v))); err != nil {
+			return err
+		}
+		io.WriteString(carrier, v)
+	}
+
+	return nil
+}
+
+// W3C limits https://github.com/w3c/baggage/blob/master/baggage/HTTP_HEADER_FORMAT.md#limits
+const (
+	maxBinaryBaggage      = 180
+	maxBinaryNameValueLen = 4096
+)
+
+// Extract implements Extractor of BinaryPropagator
+func (p *BinaryPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	carrier, ok := abstractCarrier.(io.Reader)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	var ctx SpanContext
+	ctx.samplingState = &samplingState{}
+
+	if err := binary.Read(carrier, binary.BigEndian, &ctx.traceID); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if err := binary.Read(carrier, binary.BigEndian, &ctx.spanID); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if err := binary.Read(carrier, binary.BigEndian, &ctx.parentID); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+
+	var flags byte
+	if err := binary.Read(carrier, binary.BigEndian, &flags); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	ctx.samplingState.setFlags(flags)
+
+	// Handle the baggage items
+	var numBaggage int32
+	if err := binary.Read(carrier, binary.BigEndian, &numBaggage); err != nil {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if numBaggage > maxBinaryBaggage {
+		return emptyContext, opentracing.ErrSpanContextCorrupted
+	}
+	if iNumBaggage := int(numBaggage); iNumBaggage > 0 {
+		ctx.baggage = make(map[string]string, iNumBaggage)
+		buf := p.buffers.Get().(*bytes.Buffer)
+		defer p.buffers.Put(buf)
+
+		var keyLen, valLen int32
+		for i := 0; i < iNumBaggage; i++ {
+			if err := binary.Read(carrier, binary.BigEndian, &keyLen); err != nil {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			buf.Reset()
+			buf.Grow(int(keyLen))
+			if n, err := io.CopyN(buf, carrier, int64(keyLen)); err != nil || int32(n) != keyLen {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			key := buf.String()
+
+			if err := binary.Read(carrier, binary.BigEndian, &valLen); err != nil {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			if keyLen+valLen > maxBinaryNameValueLen {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			buf.Reset()
+			buf.Grow(int(valLen))
+			if n, err := io.CopyN(buf, carrier, int64(valLen)); err != nil || int32(n) != valLen {
+				return emptyContext, opentracing.ErrSpanContextCorrupted
+			}
+			ctx.baggage[key] = buf.String()
+		}
+	}
+
+	return ctx, nil
+}
+
+// Converts a comma separated key value pair list into a map
+// e.g. key1=value1, key2=value2, key3 = value3
+// is converted to map[string]string { "key1" : "value1",
+//                                     "key2" : "value2",
+//                                     "key3" : "value3" }
+func (p *TextMapPropagator) parseCommaSeparatedMap(value string) map[string]string {
+	baggage := make(map[string]string)
+	value, err := url.QueryUnescape(value)
+	if err != nil {
+		log.Printf("Unable to unescape %s, %v", value, err)
+		return baggage
+	}
+	for _, kvpair := range strings.Split(value, ",") {
+		kv := strings.Split(strings.TrimSpace(kvpair), "=")
+		if len(kv) == 2 {
+			baggage[strings.TrimSpace(kv[0])] = kv[1]
+		} else {
+			log.Printf("Malformed value passed in for %s", p.headerKeys.JaegerBaggageHeader)
+		}
+	}
+	return baggage
+}
+
+// Converts a baggage item key into an http header format,
+// by prepending TraceBaggageHeaderPrefix and encoding the key string
+func (p *TextMapPropagator) addBaggageKeyPrefix(key string) string {
+	// TODO encodeBaggageKeyAsHeader add caching and escaping
+	return fmt.Sprintf("%v%v", p.headerKeys.TraceBaggageHeaderPrefix, key)
+}
+
+func (p *TextMapPropagator) removeBaggageKeyPrefix(key string) string {
+	// TODO decodeBaggageHeaderKey add caching and escaping
+	return key[len(p.headerKeys.TraceBaggageHeaderPrefix):]
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reference.go b/vendor/github.com/uber/jaeger-client-go/reference.go
new file mode 100644
index 0000000..5646e78
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reference.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "github.com/opentracing/opentracing-go"
+
+// Reference represents a causal reference to other Spans (via their SpanContext).
+type Reference struct {
+	Type    opentracing.SpanReferenceType
+	Context SpanContext
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter.go b/vendor/github.com/uber/jaeger-client-go/reporter.go
new file mode 100644
index 0000000..a71a92c
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter.go
@@ -0,0 +1,322 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+
+	"github.com/uber/jaeger-client-go/internal/reporterstats"
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// Reporter is called by the tracer when a span is completed to report the span to the tracing collector.
+type Reporter interface {
+	// Report submits a new span to collectors, possibly asynchronously and/or with buffering.
+	// If the reporter is processing Span asynchronously then it needs to Retain() the span,
+	// and then Release() it when no longer needed, to avoid span data corruption.
+	Report(span *Span)
+
+	// Close does a clean shutdown of the reporter, flushing any traces that may be buffered in memory.
+	Close()
+}
+
+// ------------------------------
+
+type nullReporter struct{}
+
+// NewNullReporter creates a no-op reporter that ignores all reported spans.
+func NewNullReporter() Reporter {
+	return &nullReporter{}
+}
+
+// Report implements Report() method of Reporter by doing nothing.
+func (r *nullReporter) Report(span *Span) {
+	// no-op
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *nullReporter) Close() {
+	// no-op
+}
+
+// ------------------------------
+
+type loggingReporter struct {
+	logger Logger
+}
+
+// NewLoggingReporter creates a reporter that logs all reported spans to provided logger.
+func NewLoggingReporter(logger Logger) Reporter {
+	return &loggingReporter{logger}
+}
+
+// Report implements Report() method of Reporter by logging the span to the logger.
+func (r *loggingReporter) Report(span *Span) {
+	r.logger.Infof("Reporting span %+v", span)
+}
+
+// Close implements Close() method of Reporter by doing nothing.
+func (r *loggingReporter) Close() {
+	// no-op
+}
+
+// ------------------------------
+
+// InMemoryReporter is used for testing, and simply collects spans in memory.
+type InMemoryReporter struct {
+	spans []opentracing.Span
+	lock  sync.Mutex
+}
+
+// NewInMemoryReporter creates a reporter that stores spans in memory.
+// NOTE: the Tracer should be created with options.PoolSpans = false.
+func NewInMemoryReporter() *InMemoryReporter {
+	return &InMemoryReporter{
+		spans: make([]opentracing.Span, 0, 10),
+	}
+}
+
+// Report implements Report() method of Reporter by storing the span in the buffer.
+func (r *InMemoryReporter) Report(span *Span) {
+	r.lock.Lock()
+	// Need to retain the span otherwise it will be released
+	r.spans = append(r.spans, span.Retain())
+	r.lock.Unlock()
+}
+
+// Close implements Close() method of Reporter
+func (r *InMemoryReporter) Close() {
+	r.Reset()
+}
+
+// SpansSubmitted returns the number of spans accumulated in the buffer.
+func (r *InMemoryReporter) SpansSubmitted() int {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	return len(r.spans)
+}
+
+// GetSpans returns accumulated spans as a copy of the buffer.
+func (r *InMemoryReporter) GetSpans() []opentracing.Span {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+	copied := make([]opentracing.Span, len(r.spans))
+	copy(copied, r.spans)
+	return copied
+}
+
+// Reset clears all accumulated spans.
+func (r *InMemoryReporter) Reset() {
+	r.lock.Lock()
+	defer r.lock.Unlock()
+
+	// Before reset the collection need to release Span memory
+	for _, span := range r.spans {
+		span.(*Span).Release()
+	}
+	r.spans = r.spans[:0]
+}
+
+// ------------------------------
+
+type compositeReporter struct {
+	reporters []Reporter
+}
+
+// NewCompositeReporter creates a reporter that ignores all reported spans.
+func NewCompositeReporter(reporters ...Reporter) Reporter {
+	return &compositeReporter{reporters: reporters}
+}
+
+// Report implements Report() method of Reporter by delegating to each underlying reporter.
+func (r *compositeReporter) Report(span *Span) {
+	for _, reporter := range r.reporters {
+		reporter.Report(span)
+	}
+}
+
+// Close implements Close() method of Reporter by closing each underlying reporter.
+func (r *compositeReporter) Close() {
+	for _, reporter := range r.reporters {
+		reporter.Close()
+	}
+}
+
+// ------------- REMOTE REPORTER -----------------
+
+type reporterQueueItemType int
+
+const (
+	defaultQueueSize           = 100
+	defaultBufferFlushInterval = 1 * time.Second
+
+	reporterQueueItemSpan reporterQueueItemType = iota
+	reporterQueueItemClose
+)
+
+type reporterQueueItem struct {
+	itemType reporterQueueItemType
+	span     *Span
+	close    *sync.WaitGroup
+}
+
+// reporterStats implements reporterstats.ReporterStats.
+type reporterStats struct {
+	droppedCount int64 // provided to Transports to report data loss to the backend
+}
+
+// SpansDroppedFromQueue implements reporterstats.ReporterStats.
+func (r *reporterStats) SpansDroppedFromQueue() int64 {
+	return atomic.LoadInt64(&r.droppedCount)
+}
+
+func (r *reporterStats) incDroppedCount() {
+	atomic.AddInt64(&r.droppedCount, 1)
+}
+
+type remoteReporter struct {
+	// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+	// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+	queueLength int64 // used to update metrics.Gauge
+	closed      int64 // 0 - not closed, 1 - closed
+
+	reporterOptions
+
+	sender        Transport
+	queue         chan reporterQueueItem
+	reporterStats *reporterStats
+}
+
+// NewRemoteReporter creates a new reporter that sends spans out of process by means of Sender.
+// Calls to Report(Span) return immediately (side effect: if internal buffer is full the span is dropped).
+// Periodically the transport buffer is flushed even if it hasn't reached max packet size.
+// Calls to Close() block until all spans reported prior to the call to Close are flushed.
+func NewRemoteReporter(sender Transport, opts ...ReporterOption) Reporter {
+	options := reporterOptions{}
+	for _, option := range opts {
+		option(&options)
+	}
+	if options.bufferFlushInterval <= 0 {
+		options.bufferFlushInterval = defaultBufferFlushInterval
+	}
+	if options.logger == nil {
+		options.logger = log.NullLogger
+	}
+	if options.metrics == nil {
+		options.metrics = NewNullMetrics()
+	}
+	if options.queueSize <= 0 {
+		options.queueSize = defaultQueueSize
+	}
+	reporter := &remoteReporter{
+		reporterOptions: options,
+		sender:          sender,
+		queue:           make(chan reporterQueueItem, options.queueSize),
+		reporterStats:   new(reporterStats),
+	}
+	if receiver, ok := sender.(reporterstats.Receiver); ok {
+		receiver.SetReporterStats(reporter.reporterStats)
+	}
+	go reporter.processQueue()
+	return reporter
+}
+
+// Report implements Report() method of Reporter.
+// It passes the span to a background go-routine for submission to Jaeger backend.
+// If the internal queue is full, the span is dropped and metrics.ReporterDropped counter is incremented.
+// If Report() is called after the reporter has been Close()-ed, the additional spans will not be
+// sent to the backend, but the metrics.ReporterDropped counter may not reflect them correctly,
+// because some of them may still be successfully added to the queue.
+func (r *remoteReporter) Report(span *Span) {
+	select {
+	// Need to retain the span otherwise it will be released
+	case r.queue <- reporterQueueItem{itemType: reporterQueueItemSpan, span: span.Retain()}:
+		atomic.AddInt64(&r.queueLength, 1)
+	default:
+		r.metrics.ReporterDropped.Inc(1)
+		r.reporterStats.incDroppedCount()
+	}
+}
+
+// Close implements Close() method of Reporter by waiting for the queue to be drained.
+func (r *remoteReporter) Close() {
+	r.logger.Debugf("closing reporter")
+	if swapped := atomic.CompareAndSwapInt64(&r.closed, 0, 1); !swapped {
+		r.logger.Error("Repeated attempt to close the reporter is ignored")
+		return
+	}
+	r.sendCloseEvent()
+	_ = r.sender.Close()
+}
+
+func (r *remoteReporter) sendCloseEvent() {
+	wg := &sync.WaitGroup{}
+	wg.Add(1)
+	item := reporterQueueItem{itemType: reporterQueueItemClose, close: wg}
+
+	r.queue <- item // if the queue is full we will block until there is space
+	atomic.AddInt64(&r.queueLength, 1)
+	wg.Wait()
+}
+
+// processQueue reads spans from the queue, converts them to Thrift, and stores them in an internal buffer.
+// When the buffer length reaches batchSize, it is flushed by submitting the accumulated spans to Jaeger.
+// Buffer also gets flushed automatically every batchFlushInterval seconds, just in case the tracer stopped
+// reporting new spans.
+func (r *remoteReporter) processQueue() {
+	// flush causes the Sender to flush its accumulated spans and clear the buffer
+	flush := func() {
+		if flushed, err := r.sender.Flush(); err != nil {
+			r.metrics.ReporterFailure.Inc(int64(flushed))
+			r.logger.Error(fmt.Sprintf("failed to flush Jaeger spans to server: %s", err.Error()))
+		} else if flushed > 0 {
+			r.metrics.ReporterSuccess.Inc(int64(flushed))
+		}
+	}
+
+	timer := time.NewTicker(r.bufferFlushInterval)
+	for {
+		select {
+		case <-timer.C:
+			flush()
+		case item := <-r.queue:
+			atomic.AddInt64(&r.queueLength, -1)
+			switch item.itemType {
+			case reporterQueueItemSpan:
+				span := item.span
+				if flushed, err := r.sender.Append(span); err != nil {
+					r.metrics.ReporterFailure.Inc(int64(flushed))
+					r.logger.Error(fmt.Sprintf("error reporting Jaeger span %q: %s", span.OperationName(), err.Error()))
+				} else if flushed > 0 {
+					r.metrics.ReporterSuccess.Inc(int64(flushed))
+					// to reduce the number of gauge stats, we only emit queue length on flush
+					r.metrics.ReporterQueueLength.Update(atomic.LoadInt64(&r.queueLength))
+					r.logger.Debugf("flushed %d spans", flushed)
+				}
+				span.Release()
+			case reporterQueueItemClose:
+				timer.Stop()
+				flush()
+				item.close.Done()
+				return
+			}
+		}
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/reporter_options.go b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
new file mode 100644
index 0000000..2fc0305
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/reporter_options.go
@@ -0,0 +1,71 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// ReporterOption is a function that sets some option on the reporter.
+type ReporterOption func(c *reporterOptions)
+
+// ReporterOptions is a factory for all available ReporterOption's
+var ReporterOptions reporterOptions
+
+// reporterOptions control behavior of the reporter.
+type reporterOptions struct {
+	// queueSize is the size of internal queue where reported spans are stored before they are processed in the background
+	queueSize int
+	// bufferFlushInterval is how often the buffer is force-flushed, even if it's not full
+	bufferFlushInterval time.Duration
+	// logger is used to log errors of span submissions
+	logger log.DebugLogger
+	// metrics is used to record runtime stats
+	metrics *Metrics
+}
+
+// QueueSize creates a ReporterOption that sets the size of the internal queue where
+// spans are stored before they are processed.
+func (reporterOptions) QueueSize(queueSize int) ReporterOption {
+	return func(r *reporterOptions) {
+		r.queueSize = queueSize
+	}
+}
+
+// Metrics creates a ReporterOption that initializes Metrics in the reporter,
+// which is used to record runtime statistics.
+func (reporterOptions) Metrics(metrics *Metrics) ReporterOption {
+	return func(r *reporterOptions) {
+		r.metrics = metrics
+	}
+}
+
+// BufferFlushInterval creates a ReporterOption that sets how often the queue
+// is force-flushed.
+func (reporterOptions) BufferFlushInterval(bufferFlushInterval time.Duration) ReporterOption {
+	return func(r *reporterOptions) {
+		r.bufferFlushInterval = bufferFlushInterval
+	}
+}
+
+// Logger creates a ReporterOption that initializes the logger used to log
+// errors of span submissions.
+func (reporterOptions) Logger(logger Logger) ReporterOption {
+	return func(r *reporterOptions) {
+		r.logger = log.DebugLogAdapter(logger)
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
new file mode 100644
index 0000000..879948e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/README.md
@@ -0,0 +1,5 @@
+An Observer that can be used to emit RPC metrics
+================================================
+
+It can be attached to the tracer during tracer construction.
+See `ExampleObserver` function in [observer_test.go](./observer_test.go).
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
new file mode 100644
index 0000000..51aa11b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/doc.go
@@ -0,0 +1,16 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package rpcmetrics implements an Observer that can be used to emit RPC metrics.
+package rpcmetrics
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
new file mode 100644
index 0000000..3055524
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/endpoints.go
@@ -0,0 +1,63 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import "sync"
+
+// normalizedEndpoints is a cache for endpointName -> safeName mappings.
+type normalizedEndpoints struct {
+	names       map[string]string
+	maxSize     int
+	defaultName string
+	normalizer  NameNormalizer
+	mux         sync.RWMutex
+}
+
+func newNormalizedEndpoints(maxSize int, normalizer NameNormalizer) *normalizedEndpoints {
+	return &normalizedEndpoints{
+		maxSize:    maxSize,
+		normalizer: normalizer,
+		names:      make(map[string]string, maxSize),
+	}
+}
+
+// normalize looks up the name in the cache, if not found it uses normalizer
+// to convert the name to a safe name. If called with more than maxSize unique
+// names it returns "" for all other names beyond those already cached.
+func (n *normalizedEndpoints) normalize(name string) string {
+	n.mux.RLock()
+	norm, ok := n.names[name]
+	l := len(n.names)
+	n.mux.RUnlock()
+	if ok {
+		return norm
+	}
+	if l >= n.maxSize {
+		return ""
+	}
+	return n.normalizeWithLock(name)
+}
+
+func (n *normalizedEndpoints) normalizeWithLock(name string) string {
+	norm := n.normalizer.Normalize(name)
+	n.mux.Lock()
+	defer n.mux.Unlock()
+	// cache may have grown while we were not holding the lock
+	if len(n.names) >= n.maxSize {
+		return ""
+	}
+	n.names[name] = norm
+	return norm
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
new file mode 100644
index 0000000..a8cec2f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/metrics.go
@@ -0,0 +1,124 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+	"sync"
+
+	"github.com/uber/jaeger-lib/metrics"
+)
+
+const (
+	otherEndpointsPlaceholder = "other"
+	endpointNameMetricTag     = "endpoint"
+)
+
+// Metrics is a collection of metrics for an endpoint describing
+// throughput, success, errors, and performance.
+type Metrics struct {
+	// RequestCountSuccess is a counter of the total number of successes.
+	RequestCountSuccess metrics.Counter `metric:"requests" tags:"error=false"`
+
+	// RequestCountFailures is a counter of the number of times any failure has been observed.
+	RequestCountFailures metrics.Counter `metric:"requests" tags:"error=true"`
+
+	// RequestLatencySuccess is a latency histogram of successful requests.
+	RequestLatencySuccess metrics.Timer `metric:"request_latency" tags:"error=false"`
+
+	// RequestLatencyFailures is a latency histogram of failed requests.
+	RequestLatencyFailures metrics.Timer `metric:"request_latency" tags:"error=true"`
+
+	// HTTPStatusCode2xx is a counter of the total number of requests with HTTP status code 200-299
+	HTTPStatusCode2xx metrics.Counter `metric:"http_requests" tags:"status_code=2xx"`
+
+	// HTTPStatusCode3xx is a counter of the total number of requests with HTTP status code 300-399
+	HTTPStatusCode3xx metrics.Counter `metric:"http_requests" tags:"status_code=3xx"`
+
+	// HTTPStatusCode4xx is a counter of the total number of requests with HTTP status code 400-499
+	HTTPStatusCode4xx metrics.Counter `metric:"http_requests" tags:"status_code=4xx"`
+
+	// HTTPStatusCode5xx is a counter of the total number of requests with HTTP status code 500-599
+	HTTPStatusCode5xx metrics.Counter `metric:"http_requests" tags:"status_code=5xx"`
+}
+
+func (m *Metrics) recordHTTPStatusCode(statusCode uint16) {
+	if statusCode >= 200 && statusCode < 300 {
+		m.HTTPStatusCode2xx.Inc(1)
+	} else if statusCode >= 300 && statusCode < 400 {
+		m.HTTPStatusCode3xx.Inc(1)
+	} else if statusCode >= 400 && statusCode < 500 {
+		m.HTTPStatusCode4xx.Inc(1)
+	} else if statusCode >= 500 && statusCode < 600 {
+		m.HTTPStatusCode5xx.Inc(1)
+	}
+}
+
+// MetricsByEndpoint is a registry/cache of metrics for each unique endpoint name.
+// Only maxNumberOfEndpoints Metrics are stored, all other endpoint names are mapped
+// to a generic endpoint name "other".
+type MetricsByEndpoint struct {
+	metricsFactory    metrics.Factory
+	endpoints         *normalizedEndpoints
+	metricsByEndpoint map[string]*Metrics
+	mux               sync.RWMutex
+}
+
+func newMetricsByEndpoint(
+	metricsFactory metrics.Factory,
+	normalizer NameNormalizer,
+	maxNumberOfEndpoints int,
+) *MetricsByEndpoint {
+	return &MetricsByEndpoint{
+		metricsFactory:    metricsFactory,
+		endpoints:         newNormalizedEndpoints(maxNumberOfEndpoints, normalizer),
+		metricsByEndpoint: make(map[string]*Metrics, maxNumberOfEndpoints+1), // +1 for "other"
+	}
+}
+
+func (m *MetricsByEndpoint) get(endpoint string) *Metrics {
+	safeName := m.endpoints.normalize(endpoint)
+	if safeName == "" {
+		safeName = otherEndpointsPlaceholder
+	}
+	m.mux.RLock()
+	met := m.metricsByEndpoint[safeName]
+	m.mux.RUnlock()
+	if met != nil {
+		return met
+	}
+
+	return m.getWithWriteLock(safeName)
+}
+
+// split to make easier to test
+func (m *MetricsByEndpoint) getWithWriteLock(safeName string) *Metrics {
+	m.mux.Lock()
+	defer m.mux.Unlock()
+
+	// it is possible that the name has been already registered after we released
+	// the read lock and before we grabbed the write lock, so check for that.
+	if met, ok := m.metricsByEndpoint[safeName]; ok {
+		return met
+	}
+
+	// it would be nice to create the struct before locking, since Init() is somewhat
+	// expensive, however some metrics backends (e.g. expvar) may not like duplicate metrics.
+	met := &Metrics{}
+	tags := map[string]string{endpointNameMetricTag: safeName}
+	metrics.Init(met, m.metricsFactory, tags)
+
+	m.metricsByEndpoint[safeName] = met
+	return met
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
new file mode 100644
index 0000000..148d84b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/normalizer.go
@@ -0,0 +1,101 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+// NameNormalizer is used to convert the endpoint names to strings
+// that can be safely used as tags in the metrics.
+type NameNormalizer interface {
+	Normalize(name string) string
+}
+
+// DefaultNameNormalizer converts endpoint names so that they contain only characters
+// from the safe charset [a-zA-Z0-9-./_]. All other characters are replaced with '-'.
+var DefaultNameNormalizer = &SimpleNameNormalizer{
+	SafeSets: []SafeCharacterSet{
+		&Range{From: 'a', To: 'z'},
+		&Range{From: 'A', To: 'Z'},
+		&Range{From: '0', To: '9'},
+		&Char{'-'},
+		&Char{'_'},
+		&Char{'/'},
+		&Char{'.'},
+	},
+	Replacement: '-',
+}
+
+// SimpleNameNormalizer uses a set of safe character sets.
+type SimpleNameNormalizer struct {
+	SafeSets    []SafeCharacterSet
+	Replacement byte
+}
+
+// SafeCharacterSet determines if the given character is "safe"
+type SafeCharacterSet interface {
+	IsSafe(c byte) bool
+}
+
+// Range implements SafeCharacterSet
+type Range struct {
+	From, To byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (r *Range) IsSafe(c byte) bool {
+	return c >= r.From && c <= r.To
+}
+
+// Char implements SafeCharacterSet
+type Char struct {
+	Val byte
+}
+
+// IsSafe implements SafeCharacterSet
+func (ch *Char) IsSafe(c byte) bool {
+	return c == ch.Val
+}
+
+// Normalize checks each character in the string against SafeSets,
+// and if it's not safe substitutes it with Replacement.
+func (n *SimpleNameNormalizer) Normalize(name string) string {
+	var retMe []byte
+	nameBytes := []byte(name)
+	for i, b := range nameBytes {
+		if n.safeByte(b) {
+			if retMe != nil {
+				retMe[i] = b
+			}
+		} else {
+			if retMe == nil {
+				retMe = make([]byte, len(nameBytes))
+				copy(retMe[0:i], nameBytes[0:i])
+			}
+			retMe[i] = n.Replacement
+		}
+	}
+	if retMe == nil {
+		return name
+	}
+	return string(retMe)
+}
+
+// safeByte checks if b against all safe charsets.
+func (n *SimpleNameNormalizer) safeByte(b byte) bool {
+	for i := range n.SafeSets {
+		if n.SafeSets[i].IsSafe(b) {
+			return true
+		}
+	}
+	return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
new file mode 100644
index 0000000..eca5ff6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/rpcmetrics/observer.go
@@ -0,0 +1,171 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package rpcmetrics
+
+import (
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"github.com/uber/jaeger-lib/metrics"
+
+	jaeger "github.com/uber/jaeger-client-go"
+)
+
+const defaultMaxNumberOfEndpoints = 200
+
+// Observer is an observer that can emit RPC metrics.
+type Observer struct {
+	metricsByEndpoint *MetricsByEndpoint
+}
+
+// NewObserver creates a new observer that can emit RPC metrics.
+func NewObserver(metricsFactory metrics.Factory, normalizer NameNormalizer) *Observer {
+	return &Observer{
+		metricsByEndpoint: newMetricsByEndpoint(
+			metricsFactory,
+			normalizer,
+			defaultMaxNumberOfEndpoints,
+		),
+	}
+}
+
+// OnStartSpan creates a new Observer for the span.
+func (o *Observer) OnStartSpan(
+	operationName string,
+	options opentracing.StartSpanOptions,
+) jaeger.SpanObserver {
+	return NewSpanObserver(o.metricsByEndpoint, operationName, options)
+}
+
+// SpanKind identifies the span as inboud, outbound, or internal
+type SpanKind int
+
+const (
+	// Local span kind
+	Local SpanKind = iota
+	// Inbound span kind
+	Inbound
+	// Outbound span kind
+	Outbound
+)
+
+// SpanObserver collects RPC metrics
+type SpanObserver struct {
+	metricsByEndpoint *MetricsByEndpoint
+	operationName     string
+	startTime         time.Time
+	mux               sync.Mutex
+	kind              SpanKind
+	httpStatusCode    uint16
+	err               bool
+}
+
+// NewSpanObserver creates a new SpanObserver that can emit RPC metrics.
+func NewSpanObserver(
+	metricsByEndpoint *MetricsByEndpoint,
+	operationName string,
+	options opentracing.StartSpanOptions,
+) *SpanObserver {
+	so := &SpanObserver{
+		metricsByEndpoint: metricsByEndpoint,
+		operationName:     operationName,
+		startTime:         options.StartTime,
+	}
+	for k, v := range options.Tags {
+		so.handleTagInLock(k, v)
+	}
+	return so
+}
+
+// handleTags watches for special tags
+// - SpanKind
+// - HttpStatusCode
+// - Error
+func (so *SpanObserver) handleTagInLock(key string, value interface{}) {
+	if key == string(ext.SpanKind) {
+		if v, ok := value.(ext.SpanKindEnum); ok {
+			value = string(v)
+		}
+		if v, ok := value.(string); ok {
+			if v == string(ext.SpanKindRPCClientEnum) {
+				so.kind = Outbound
+			} else if v == string(ext.SpanKindRPCServerEnum) {
+				so.kind = Inbound
+			}
+		}
+		return
+	}
+	if key == string(ext.HTTPStatusCode) {
+		if v, ok := value.(uint16); ok {
+			so.httpStatusCode = v
+		} else if v, ok := value.(int); ok {
+			so.httpStatusCode = uint16(v)
+		} else if v, ok := value.(string); ok {
+			if vv, err := strconv.Atoi(v); err == nil {
+				so.httpStatusCode = uint16(vv)
+			}
+		}
+		return
+	}
+	if key == string(ext.Error) {
+		if v, ok := value.(bool); ok {
+			so.err = v
+		} else if v, ok := value.(string); ok {
+			if vv, err := strconv.ParseBool(v); err == nil {
+				so.err = vv
+			}
+		}
+		return
+	}
+}
+
+// OnFinish emits the RPC metrics. It only has an effect when operation name
+// is not blank, and the span kind is an RPC server.
+func (so *SpanObserver) OnFinish(options opentracing.FinishOptions) {
+	so.mux.Lock()
+	defer so.mux.Unlock()
+
+	if so.operationName == "" || so.kind != Inbound {
+		return
+	}
+
+	mets := so.metricsByEndpoint.get(so.operationName)
+	latency := options.FinishTime.Sub(so.startTime)
+	if so.err {
+		mets.RequestCountFailures.Inc(1)
+		mets.RequestLatencyFailures.Record(latency)
+	} else {
+		mets.RequestCountSuccess.Inc(1)
+		mets.RequestLatencySuccess.Record(latency)
+	}
+	mets.recordHTTPStatusCode(so.httpStatusCode)
+}
+
+// OnSetOperationName records new operation name.
+func (so *SpanObserver) OnSetOperationName(operationName string) {
+	so.mux.Lock()
+	so.operationName = operationName
+	so.mux.Unlock()
+}
+
+// OnSetTag implements SpanObserver
+func (so *SpanObserver) OnSetTag(key string, value interface{}) {
+	so.mux.Lock()
+	so.handleTagInLock(key, value)
+	so.mux.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler.go b/vendor/github.com/uber/jaeger-client-go/sampler.go
new file mode 100644
index 0000000..d0be8ad
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler.go
@@ -0,0 +1,516 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+	"math"
+	"strings"
+	"sync"
+
+	"github.com/uber/jaeger-client-go/thrift-gen/sampling"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+	defaultMaxOperations = 2000
+)
+
+// Sampler decides whether a new trace should be sampled or not.
+type Sampler interface {
+	// IsSampled decides whether a trace with given `id` and `operation`
+	// should be sampled. This function will also return the tags that
+	// can be used to identify the type of sampling that was applied to
+	// the root span. Most simple samplers would return two tags,
+	// sampler.type and sampler.param, similar to those used in the Configuration
+	IsSampled(id TraceID, operation string) (sampled bool, tags []Tag)
+
+	// Close does a clean shutdown of the sampler, stopping any background
+	// go-routines it may have started.
+	Close()
+
+	// Equal checks if the `other` sampler is functionally equivalent
+	// to this sampler.
+	// TODO (breaking change) remove this function. See PerOperationSampler.Equals for explanation.
+	Equal(other Sampler) bool
+}
+
+// -----------------------
+
+// ConstSampler is a sampler that always makes the same decision.
+type ConstSampler struct {
+	legacySamplerV1Base
+	Decision bool
+	tags     []Tag
+}
+
+// NewConstSampler creates a ConstSampler.
+func NewConstSampler(sample bool) *ConstSampler {
+	tags := []Tag{
+		{key: SamplerTypeTagKey, value: SamplerTypeConst},
+		{key: SamplerParamTagKey, value: sample},
+	}
+	s := &ConstSampler{
+		Decision: sample,
+		tags:     tags,
+	}
+	s.delegate = s.IsSampled
+	return s
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ConstSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return s.Decision, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ConstSampler) Close() {
+	// nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ConstSampler) Equal(other Sampler) bool {
+	if o, ok := other.(*ConstSampler); ok {
+		return s.Decision == o.Decision
+	}
+	return false
+}
+
+// String is used to log sampler details.
+func (s *ConstSampler) String() string {
+	return fmt.Sprintf("ConstSampler(decision=%t)", s.Decision)
+}
+
+// -----------------------
+
+// ProbabilisticSampler is a sampler that randomly samples a certain percentage
+// of traces.
+type ProbabilisticSampler struct {
+	legacySamplerV1Base
+	samplingRate     float64
+	samplingBoundary uint64
+	tags             []Tag
+}
+
+const maxRandomNumber = ^(uint64(1) << 63) // i.e. 0x7fffffffffffffff
+
+// NewProbabilisticSampler creates a sampler that randomly samples a certain percentage of traces specified by the
+// samplingRate, in the range between 0.0 and 1.0.
+//
+// It relies on the fact that new trace IDs are 63bit random numbers themselves, thus making the sampling decision
+// without generating a new random number, but simply calculating if traceID < (samplingRate * 2^63).
+// TODO remove the error from this function for next major release
+func NewProbabilisticSampler(samplingRate float64) (*ProbabilisticSampler, error) {
+	if samplingRate < 0.0 || samplingRate > 1.0 {
+		return nil, fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+	}
+	return newProbabilisticSampler(samplingRate), nil
+}
+
+func newProbabilisticSampler(samplingRate float64) *ProbabilisticSampler {
+	s := new(ProbabilisticSampler)
+	s.delegate = s.IsSampled
+	return s.init(samplingRate)
+}
+
+func (s *ProbabilisticSampler) init(samplingRate float64) *ProbabilisticSampler {
+	s.samplingRate = math.Max(0.0, math.Min(samplingRate, 1.0))
+	s.samplingBoundary = uint64(float64(maxRandomNumber) * s.samplingRate)
+	s.tags = []Tag{
+		{key: SamplerTypeTagKey, value: SamplerTypeProbabilistic},
+		{key: SamplerParamTagKey, value: s.samplingRate},
+	}
+	return s
+}
+
+// SamplingRate returns the sampling probability this sampled was constructed with.
+func (s *ProbabilisticSampler) SamplingRate() float64 {
+	return s.samplingRate
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *ProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return s.samplingBoundary >= id.Low&maxRandomNumber, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *ProbabilisticSampler) Close() {
+	// nothing to do
+}
+
+// Equal implements Equal() of Sampler.
+func (s *ProbabilisticSampler) Equal(other Sampler) bool {
+	if o, ok := other.(*ProbabilisticSampler); ok {
+		return s.samplingBoundary == o.samplingBoundary
+	}
+	return false
+}
+
+// Update modifies in-place the sampling rate. Locking must be done externally.
+func (s *ProbabilisticSampler) Update(samplingRate float64) error {
+	if samplingRate < 0.0 || samplingRate > 1.0 {
+		return fmt.Errorf("Sampling Rate must be between 0.0 and 1.0, received %f", samplingRate)
+	}
+	s.init(samplingRate)
+	return nil
+}
+
+// String is used to log sampler details.
+func (s *ProbabilisticSampler) String() string {
+	return fmt.Sprintf("ProbabilisticSampler(samplingRate=%v)", s.samplingRate)
+}
+
+// -----------------------
+
+// RateLimitingSampler samples at most maxTracesPerSecond. The distribution of sampled traces follows
+// burstiness of the service, i.e. a service with uniformly distributed requests will have those
+// requests sampled uniformly as well, but if requests are bursty, especially sub-second, then a
+// number of sequential requests can be sampled each second.
+type RateLimitingSampler struct {
+	legacySamplerV1Base
+	maxTracesPerSecond float64
+	rateLimiter        *utils.ReconfigurableRateLimiter
+	tags               []Tag
+}
+
+// NewRateLimitingSampler creates new RateLimitingSampler.
+func NewRateLimitingSampler(maxTracesPerSecond float64) *RateLimitingSampler {
+	s := new(RateLimitingSampler)
+	s.delegate = s.IsSampled
+	return s.init(maxTracesPerSecond)
+}
+
+func (s *RateLimitingSampler) init(maxTracesPerSecond float64) *RateLimitingSampler {
+	if s.rateLimiter == nil {
+		s.rateLimiter = utils.NewRateLimiter(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+	} else {
+		s.rateLimiter.Update(maxTracesPerSecond, math.Max(maxTracesPerSecond, 1.0))
+	}
+	s.maxTracesPerSecond = maxTracesPerSecond
+	s.tags = []Tag{
+		{key: SamplerTypeTagKey, value: SamplerTypeRateLimiting},
+		{key: SamplerParamTagKey, value: maxTracesPerSecond},
+	}
+	return s
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *RateLimitingSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return s.rateLimiter.CheckCredit(1.0), s.tags
+}
+
+// Update reconfigures the rate limiter, while preserving its accumulated balance.
+// Locking must be done externally.
+func (s *RateLimitingSampler) Update(maxTracesPerSecond float64) {
+	if s.maxTracesPerSecond != maxTracesPerSecond {
+		s.init(maxTracesPerSecond)
+	}
+}
+
+// Close does nothing.
+func (s *RateLimitingSampler) Close() {
+	// nothing to do
+}
+
+// Equal compares with another sampler.
+func (s *RateLimitingSampler) Equal(other Sampler) bool {
+	if o, ok := other.(*RateLimitingSampler); ok {
+		return s.maxTracesPerSecond == o.maxTracesPerSecond
+	}
+	return false
+}
+
+// String is used to log sampler details.
+func (s *RateLimitingSampler) String() string {
+	return fmt.Sprintf("RateLimitingSampler(maxTracesPerSecond=%v)", s.maxTracesPerSecond)
+}
+
+// -----------------------
+
+// GuaranteedThroughputProbabilisticSampler is a sampler that leverages both ProbabilisticSampler and
+// RateLimitingSampler. The RateLimitingSampler is used as a guaranteed lower bound sampler such that
+// every operation is sampled at least once in a time interval defined by the lowerBound. ie a lowerBound
+// of 1.0 / (60 * 10) will sample an operation at least once every 10 minutes.
+//
+// The ProbabilisticSampler is given higher priority when tags are emitted, ie. if IsSampled() for both
+// samplers return true, the tags for ProbabilisticSampler will be used.
+type GuaranteedThroughputProbabilisticSampler struct {
+	probabilisticSampler *ProbabilisticSampler
+	lowerBoundSampler    *RateLimitingSampler
+	tags                 []Tag
+	samplingRate         float64
+	lowerBound           float64
+}
+
+// NewGuaranteedThroughputProbabilisticSampler returns a delegating sampler that applies both
+// ProbabilisticSampler and RateLimitingSampler.
+func NewGuaranteedThroughputProbabilisticSampler(
+	lowerBound, samplingRate float64,
+) (*GuaranteedThroughputProbabilisticSampler, error) {
+	return newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate), nil
+}
+
+func newGuaranteedThroughputProbabilisticSampler(lowerBound, samplingRate float64) *GuaranteedThroughputProbabilisticSampler {
+	s := &GuaranteedThroughputProbabilisticSampler{
+		lowerBoundSampler: NewRateLimitingSampler(lowerBound),
+		lowerBound:        lowerBound,
+	}
+	s.setProbabilisticSampler(samplingRate)
+	return s
+}
+
+func (s *GuaranteedThroughputProbabilisticSampler) setProbabilisticSampler(samplingRate float64) {
+	if s.probabilisticSampler == nil {
+		s.probabilisticSampler = newProbabilisticSampler(samplingRate)
+	} else if s.samplingRate != samplingRate {
+		s.probabilisticSampler.init(samplingRate)
+	}
+	// since we don't validate samplingRate, sampler may have clamped it to [0, 1] interval
+	samplingRate = s.probabilisticSampler.SamplingRate()
+	if s.samplingRate != samplingRate || s.tags == nil {
+		s.samplingRate = s.probabilisticSampler.SamplingRate()
+		s.tags = []Tag{
+			{key: SamplerTypeTagKey, value: SamplerTypeLowerBound},
+			{key: SamplerParamTagKey, value: s.samplingRate},
+		}
+	}
+}
+
+// IsSampled implements IsSampled() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	if sampled, tags := s.probabilisticSampler.IsSampled(id, operation); sampled {
+		s.lowerBoundSampler.IsSampled(id, operation)
+		return true, tags
+	}
+	sampled, _ := s.lowerBoundSampler.IsSampled(id, operation)
+	return sampled, s.tags
+}
+
+// Close implements Close() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Close() {
+	s.probabilisticSampler.Close()
+	s.lowerBoundSampler.Close()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *GuaranteedThroughputProbabilisticSampler) Equal(other Sampler) bool {
+	// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+	// more information.
+	return false
+}
+
+// this function should only be called while holding a Write lock
+func (s *GuaranteedThroughputProbabilisticSampler) update(lowerBound, samplingRate float64) {
+	s.setProbabilisticSampler(samplingRate)
+	if s.lowerBound != lowerBound {
+		s.lowerBoundSampler.Update(lowerBound)
+		s.lowerBound = lowerBound
+	}
+}
+
+func (s GuaranteedThroughputProbabilisticSampler) String() string {
+	return fmt.Sprintf("GuaranteedThroughputProbabilisticSampler(lowerBound=%f, samplingRate=%f)", s.lowerBound, s.samplingRate)
+}
+
+// -----------------------
+
+// PerOperationSampler is a delegating sampler that applies GuaranteedThroughputProbabilisticSampler
+// on a per-operation basis.
+type PerOperationSampler struct {
+	sync.RWMutex
+
+	samplers       map[string]*GuaranteedThroughputProbabilisticSampler
+	defaultSampler *ProbabilisticSampler
+	lowerBound     float64
+	maxOperations  int
+
+	// see description in PerOperationSamplerParams
+	operationNameLateBinding bool
+}
+
+// NewAdaptiveSampler returns a new PerOperationSampler.
+// Deprecated: please use NewPerOperationSampler.
+func NewAdaptiveSampler(strategies *sampling.PerOperationSamplingStrategies, maxOperations int) (*PerOperationSampler, error) {
+	return NewPerOperationSampler(PerOperationSamplerParams{
+		MaxOperations: maxOperations,
+		Strategies:    strategies,
+	}), nil
+}
+
+// PerOperationSamplerParams defines parameters when creating PerOperationSampler.
+type PerOperationSamplerParams struct {
+	// Max number of operations that will be tracked. Other operations will be given default strategy.
+	MaxOperations int
+
+	// Opt-in feature for applications that require late binding of span name via explicit call to SetOperationName.
+	// When this feature is enabled, the sampler will return retryable=true from OnCreateSpan(), thus leaving
+	// the sampling decision as non-final (and the span as writeable). This may lead to degraded performance
+	// in applications that always provide the correct span name on trace creation.
+	//
+	// For backwards compatibility this option is off by default.
+	OperationNameLateBinding bool
+
+	// Initial configuration of the sampling strategies (usually retrieved from the backend by Remote Sampler).
+	Strategies *sampling.PerOperationSamplingStrategies
+}
+
+// NewPerOperationSampler returns a new PerOperationSampler.
+func NewPerOperationSampler(params PerOperationSamplerParams) *PerOperationSampler {
+	if params.MaxOperations <= 0 {
+		params.MaxOperations = defaultMaxOperations
+	}
+	samplers := make(map[string]*GuaranteedThroughputProbabilisticSampler)
+	for _, strategy := range params.Strategies.PerOperationStrategies {
+		sampler := newGuaranteedThroughputProbabilisticSampler(
+			params.Strategies.DefaultLowerBoundTracesPerSecond,
+			strategy.ProbabilisticSampling.SamplingRate,
+		)
+		samplers[strategy.Operation] = sampler
+	}
+	return &PerOperationSampler{
+		samplers:                 samplers,
+		defaultSampler:           newProbabilisticSampler(params.Strategies.DefaultSamplingProbability),
+		lowerBound:               params.Strategies.DefaultLowerBoundTracesPerSecond,
+		maxOperations:            params.MaxOperations,
+		operationNameLateBinding: params.OperationNameLateBinding,
+	}
+}
+
+// IsSampled is not used and only exists to match Sampler V1 API.
+// TODO (breaking change) remove when upgrading everything to SamplerV2
+func (s *PerOperationSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return false, nil
+}
+
+func (s *PerOperationSampler) trySampling(span *Span, operationName string) (bool, []Tag) {
+	samplerV1 := s.getSamplerForOperation(operationName)
+	var sampled bool
+	var tags []Tag
+	if span.context.samplingState.isLocalRootSpan(span.context.spanID) {
+		sampled, tags = samplerV1.IsSampled(span.context.TraceID(), operationName)
+	}
+	return sampled, tags
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *PerOperationSampler) OnCreateSpan(span *Span) SamplingDecision {
+	sampled, tags := s.trySampling(span, span.OperationName())
+	return SamplingDecision{Sample: sampled, Retryable: s.operationNameLateBinding, Tags: tags}
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *PerOperationSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	sampled, tags := s.trySampling(span, operationName)
+	return SamplingDecision{Sample: sampled, Retryable: false, Tags: tags}
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *PerOperationSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *PerOperationSampler) OnFinishSpan(span *Span) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *PerOperationSampler) getSamplerForOperation(operation string) Sampler {
+	s.RLock()
+	sampler, ok := s.samplers[operation]
+	if ok {
+		defer s.RUnlock()
+		return sampler
+	}
+	s.RUnlock()
+	s.Lock()
+	defer s.Unlock()
+
+	// Check if sampler has already been created
+	sampler, ok = s.samplers[operation]
+	if ok {
+		return sampler
+	}
+	// Store only up to maxOperations of unique ops.
+	if len(s.samplers) >= s.maxOperations {
+		return s.defaultSampler
+	}
+	newSampler := newGuaranteedThroughputProbabilisticSampler(s.lowerBound, s.defaultSampler.SamplingRate())
+	s.samplers[operation] = newSampler
+	return newSampler
+}
+
+// Close invokes Close on all underlying samplers.
+func (s *PerOperationSampler) Close() {
+	s.Lock()
+	defer s.Unlock()
+	for _, sampler := range s.samplers {
+		sampler.Close()
+	}
+	s.defaultSampler.Close()
+}
+
+func (s *PerOperationSampler) String() string {
+	var sb strings.Builder
+
+	fmt.Fprintf(&sb, "PerOperationSampler(defaultSampler=%v, ", s.defaultSampler)
+	fmt.Fprintf(&sb, "lowerBound=%f, ", s.lowerBound)
+	fmt.Fprintf(&sb, "maxOperations=%d, ", s.maxOperations)
+	fmt.Fprintf(&sb, "operationNameLateBinding=%t, ", s.operationNameLateBinding)
+	fmt.Fprintf(&sb, "numOperations=%d,\n", len(s.samplers))
+	fmt.Fprintf(&sb, "samplers=[")
+	for operationName, sampler := range s.samplers {
+		fmt.Fprintf(&sb, "\n(operationName=%s, sampler=%v)", operationName, sampler)
+	}
+	fmt.Fprintf(&sb, "])")
+
+	return sb.String()
+}
+
+// Equal is not used.
+// TODO (breaking change) remove this in the future
+func (s *PerOperationSampler) Equal(other Sampler) bool {
+	// NB The Equal() function is overly expensive for PerOperationSampler since it's composed of multiple
+	// samplers which all need to be initialized before this function can be called for a comparison.
+	// Therefore, PerOperationSampler uses the update() function to only alter the samplers that need
+	// changing. Hence this function always returns false so that the update function can be called.
+	// Once the Equal() function is removed from the Sampler API, this will no longer be needed.
+	return false
+}
+
+func (s *PerOperationSampler) update(strategies *sampling.PerOperationSamplingStrategies) {
+	s.Lock()
+	defer s.Unlock()
+	newSamplers := map[string]*GuaranteedThroughputProbabilisticSampler{}
+	for _, strategy := range strategies.PerOperationStrategies {
+		operation := strategy.Operation
+		samplingRate := strategy.ProbabilisticSampling.SamplingRate
+		lowerBound := strategies.DefaultLowerBoundTracesPerSecond
+		if sampler, ok := s.samplers[operation]; ok {
+			sampler.update(lowerBound, samplingRate)
+			newSamplers[operation] = sampler
+		} else {
+			sampler := newGuaranteedThroughputProbabilisticSampler(
+				lowerBound,
+				samplingRate,
+			)
+			newSamplers[operation] = sampler
+		}
+	}
+	s.lowerBound = strategies.DefaultLowerBoundTracesPerSecond
+	if s.defaultSampler.SamplingRate() != strategies.DefaultSamplingProbability {
+		s.defaultSampler = newProbabilisticSampler(strategies.DefaultSamplingProbability)
+	}
+	s.samplers = newSamplers
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
new file mode 100644
index 0000000..119f0a1
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote.go
@@ -0,0 +1,358 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"encoding/json"
+	"fmt"
+	"io/ioutil"
+	"net/http"
+	"net/url"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+	"github.com/uber/jaeger-client-go/thrift-gen/sampling"
+)
+
+const (
+	defaultRemoteSamplingTimeout   = 10 * time.Second
+	defaultSamplingRefreshInterval = time.Minute
+)
+
+// SamplingStrategyFetcher is used to fetch sampling strategy updates from remote server.
+type SamplingStrategyFetcher interface {
+	Fetch(service string) ([]byte, error)
+}
+
+// SamplingStrategyParser is used to parse sampling strategy updates. The output object
+// should be of the type that is recognized by the SamplerUpdaters.
+type SamplingStrategyParser interface {
+	Parse(response []byte) (interface{}, error)
+}
+
+// SamplerUpdater is used by RemotelyControlledSampler to apply sampling strategies,
+// retrieved from remote config server, to the current sampler. The updater can modify
+// the sampler in-place if sampler supports it, or create a new one.
+//
+// If the strategy does not contain configuration for the sampler in question,
+// updater must return modifiedSampler=nil to give other updaters a chance to inspect
+// the sampling strategy response.
+//
+// RemotelyControlledSampler invokes the updaters while holding a lock on the main sampler.
+type SamplerUpdater interface {
+	Update(sampler SamplerV2, strategy interface{}) (modified SamplerV2, err error)
+}
+
+// RemotelyControlledSampler is a delegating sampler that polls a remote server
+// for the appropriate sampling strategy, constructs a corresponding sampler and
+// delegates to it for sampling decisions.
+type RemotelyControlledSampler struct {
+	// These fields must be first in the struct because `sync/atomic` expects 64-bit alignment.
+	// Cf. https://github.com/uber/jaeger-client-go/issues/155, https://goo.gl/zW7dgq
+	closed int64 // 0 - not closed, 1 - closed
+
+	sync.RWMutex // used to serialize access to samplerOptions.sampler
+	samplerOptions
+
+	serviceName string
+	doneChan    chan *sync.WaitGroup
+}
+
+// NewRemotelyControlledSampler creates a sampler that periodically pulls
+// the sampling strategy from an HTTP sampling server (e.g. jaeger-agent).
+func NewRemotelyControlledSampler(
+	serviceName string,
+	opts ...SamplerOption,
+) *RemotelyControlledSampler {
+	options := new(samplerOptions).applyOptionsAndDefaults(opts...)
+	sampler := &RemotelyControlledSampler{
+		samplerOptions: *options,
+		serviceName:    serviceName,
+		doneChan:       make(chan *sync.WaitGroup),
+	}
+	go sampler.pollController()
+	return sampler
+}
+
+// IsSampled implements IsSampled() of Sampler.
+// TODO (breaking change) remove when Sampler V1 is removed
+func (s *RemotelyControlledSampler) IsSampled(id TraceID, operation string) (bool, []Tag) {
+	return false, nil
+}
+
+// OnCreateSpan implements OnCreateSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnCreateSpan(span *Span) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
+	return s.sampler.OnCreateSpan(span)
+}
+
+// OnSetOperationName implements OnSetOperationName of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
+	return s.sampler.OnSetOperationName(span, operationName)
+}
+
+// OnSetTag implements OnSetTag of SamplerV2.
+func (s *RemotelyControlledSampler) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
+	return s.sampler.OnSetTag(span, key, value)
+}
+
+// OnFinishSpan implements OnFinishSpan of SamplerV2.
+func (s *RemotelyControlledSampler) OnFinishSpan(span *Span) SamplingDecision {
+	s.RLock()
+	defer s.RUnlock()
+	return s.sampler.OnFinishSpan(span)
+}
+
+// Close implements Close() of Sampler.
+func (s *RemotelyControlledSampler) Close() {
+	if swapped := atomic.CompareAndSwapInt64(&s.closed, 0, 1); !swapped {
+		s.logger.Error("Repeated attempt to close the sampler is ignored")
+		return
+	}
+
+	var wg sync.WaitGroup
+	wg.Add(1)
+	s.doneChan <- &wg
+	wg.Wait()
+}
+
+// Equal implements Equal() of Sampler.
+func (s *RemotelyControlledSampler) Equal(other Sampler) bool {
+	// NB The Equal() function is expensive and will be removed. See PerOperationSampler.Equal() for
+	// more information.
+	return false
+}
+
+func (s *RemotelyControlledSampler) pollController() {
+	ticker := time.NewTicker(s.samplingRefreshInterval)
+	defer ticker.Stop()
+	s.pollControllerWithTicker(ticker)
+}
+
+func (s *RemotelyControlledSampler) pollControllerWithTicker(ticker *time.Ticker) {
+	for {
+		select {
+		case <-ticker.C:
+			s.UpdateSampler()
+		case wg := <-s.doneChan:
+			wg.Done()
+			return
+		}
+	}
+}
+
+// Sampler returns the currently active sampler.
+func (s *RemotelyControlledSampler) Sampler() SamplerV2 {
+	s.RLock()
+	defer s.RUnlock()
+	return s.sampler
+}
+func (s *RemotelyControlledSampler) setSampler(sampler SamplerV2) {
+	s.Lock()
+	defer s.Unlock()
+	s.sampler = sampler
+}
+
+// UpdateSampler forces the sampler to fetch sampling strategy from backend server.
+// This function is called automatically on a timer, but can also be safely called manually, e.g. from tests.
+func (s *RemotelyControlledSampler) UpdateSampler() {
+	res, err := s.samplingFetcher.Fetch(s.serviceName)
+	if err != nil {
+		s.metrics.SamplerQueryFailure.Inc(1)
+		s.logger.Infof("failed to fetch sampling strategy: %v", err)
+		return
+	}
+	strategy, err := s.samplingParser.Parse(res)
+	if err != nil {
+		s.metrics.SamplerUpdateFailure.Inc(1)
+		s.logger.Infof("failed to parse sampling strategy response: %v", err)
+		return
+	}
+
+	s.Lock()
+	defer s.Unlock()
+
+	s.metrics.SamplerRetrieved.Inc(1)
+	if err := s.updateSamplerViaUpdaters(strategy); err != nil {
+		s.metrics.SamplerUpdateFailure.Inc(1)
+		s.logger.Infof("failed to handle sampling strategy response %+v. Got error: %v", res, err)
+		return
+	}
+	s.metrics.SamplerUpdated.Inc(1)
+}
+
+// NB: this function should only be called while holding a Write lock
+func (s *RemotelyControlledSampler) updateSamplerViaUpdaters(strategy interface{}) error {
+	for _, updater := range s.updaters {
+		sampler, err := updater.Update(s.sampler, strategy)
+		if err != nil {
+			return err
+		}
+		if sampler != nil {
+			s.logger.Debugf("sampler updated: %+v", sampler)
+			s.sampler = sampler
+			return nil
+		}
+	}
+	return fmt.Errorf("unsupported sampling strategy %+v", strategy)
+}
+
+// -----------------------
+
+// ProbabilisticSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type ProbabilisticSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *ProbabilisticSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+	type response interface {
+		GetProbabilisticSampling() *sampling.ProbabilisticSamplingStrategy
+	}
+	var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+	if resp, ok := strategy.(response); ok {
+		if probabilistic := resp.GetProbabilisticSampling(); probabilistic != nil {
+			if ps, ok := sampler.(*ProbabilisticSampler); ok {
+				if err := ps.Update(probabilistic.SamplingRate); err != nil {
+					return nil, err
+				}
+				return sampler, nil
+			}
+			return newProbabilisticSampler(probabilistic.SamplingRate), nil
+		}
+	}
+	return nil, nil
+}
+
+// -----------------------
+
+// RateLimitingSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+type RateLimitingSamplerUpdater struct{}
+
+// Update implements Update of SamplerUpdater.
+func (u *RateLimitingSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+	type response interface {
+		GetRateLimitingSampling() *sampling.RateLimitingSamplingStrategy
+	}
+	var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+	if resp, ok := strategy.(response); ok {
+		if rateLimiting := resp.GetRateLimitingSampling(); rateLimiting != nil {
+			rateLimit := float64(rateLimiting.MaxTracesPerSecond)
+			if rl, ok := sampler.(*RateLimitingSampler); ok {
+				rl.Update(rateLimit)
+				return rl, nil
+			}
+			return NewRateLimitingSampler(rateLimit), nil
+		}
+	}
+	return nil, nil
+}
+
+// -----------------------
+
+// AdaptiveSamplerUpdater is used by RemotelyControlledSampler to parse sampling configuration.
+// Fields have the same meaning as in PerOperationSamplerParams.
+type AdaptiveSamplerUpdater struct {
+	MaxOperations            int
+	OperationNameLateBinding bool
+}
+
+// Update implements Update of SamplerUpdater.
+func (u *AdaptiveSamplerUpdater) Update(sampler SamplerV2, strategy interface{}) (SamplerV2, error) {
+	type response interface {
+		GetOperationSampling() *sampling.PerOperationSamplingStrategies
+	}
+	var _ response = new(sampling.SamplingStrategyResponse) // sanity signature check
+	if p, ok := strategy.(response); ok {
+		if operations := p.GetOperationSampling(); operations != nil {
+			if as, ok := sampler.(*PerOperationSampler); ok {
+				as.update(operations)
+				return as, nil
+			}
+			return NewPerOperationSampler(PerOperationSamplerParams{
+				MaxOperations:            u.MaxOperations,
+				OperationNameLateBinding: u.OperationNameLateBinding,
+				Strategies:               operations,
+			}), nil
+		}
+	}
+	return nil, nil
+}
+
+// -----------------------
+
+type httpSamplingStrategyFetcher struct {
+	serverURL  string
+	logger     log.DebugLogger
+	httpClient http.Client
+}
+
+func newHTTPSamplingStrategyFetcher(serverURL string, logger log.DebugLogger) *httpSamplingStrategyFetcher {
+	customTransport := http.DefaultTransport.(*http.Transport).Clone()
+	customTransport.ResponseHeaderTimeout = defaultRemoteSamplingTimeout
+
+	return &httpSamplingStrategyFetcher{
+		serverURL: serverURL,
+		logger:    logger,
+		httpClient: http.Client{
+			Transport: customTransport,
+		},
+	}
+}
+
+func (f *httpSamplingStrategyFetcher) Fetch(serviceName string) ([]byte, error) {
+	v := url.Values{}
+	v.Set("service", serviceName)
+	uri := f.serverURL + "?" + v.Encode()
+
+	resp, err := f.httpClient.Get(uri)
+	if err != nil {
+		return nil, err
+	}
+
+	defer func() {
+		if err := resp.Body.Close(); err != nil {
+			f.logger.Error(fmt.Sprintf("failed to close HTTP response body: %+v", err))
+		}
+	}()
+
+	body, err := ioutil.ReadAll(resp.Body)
+	if err != nil {
+		return nil, err
+	}
+
+	if resp.StatusCode >= 400 {
+		return nil, fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+	}
+
+	return body, nil
+}
+
+// -----------------------
+
+type samplingStrategyParser struct{}
+
+func (p *samplingStrategyParser) Parse(response []byte) (interface{}, error) {
+	strategy := new(sampling.SamplingStrategyResponse)
+	if err := json.Unmarshal(response, strategy); err != nil {
+		return nil, err
+	}
+	return strategy, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
new file mode 100644
index 0000000..64b028b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_remote_options.go
@@ -0,0 +1,159 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// SamplerOption is a function that sets some option on the sampler
+type SamplerOption func(options *samplerOptions)
+
+// SamplerOptions is a factory for all available SamplerOption's.
+var SamplerOptions SamplerOptionsFactory
+
+// SamplerOptionsFactory is a factory for all available SamplerOption's.
+// The type acts as a namespace for factory functions. It is public to
+// make the functions discoverable via godoc. Recommended to be used
+// via global SamplerOptions variable.
+type SamplerOptionsFactory struct{}
+
+type samplerOptions struct {
+	metrics                 *Metrics
+	sampler                 SamplerV2
+	logger                  log.DebugLogger
+	samplingServerURL       string
+	samplingRefreshInterval time.Duration
+	samplingFetcher         SamplingStrategyFetcher
+	samplingParser          SamplingStrategyParser
+	updaters                []SamplerUpdater
+	posParams               PerOperationSamplerParams
+}
+
+// Metrics creates a SamplerOption that initializes Metrics on the sampler,
+// which is used to emit statistics.
+func (SamplerOptionsFactory) Metrics(m *Metrics) SamplerOption {
+	return func(o *samplerOptions) {
+		o.metrics = m
+	}
+}
+
+// MaxOperations creates a SamplerOption that sets the maximum number of
+// operations the sampler will keep track of.
+func (SamplerOptionsFactory) MaxOperations(maxOperations int) SamplerOption {
+	return func(o *samplerOptions) {
+		o.posParams.MaxOperations = maxOperations
+	}
+}
+
+// OperationNameLateBinding creates a SamplerOption that sets the respective
+// field in the PerOperationSamplerParams.
+func (SamplerOptionsFactory) OperationNameLateBinding(enable bool) SamplerOption {
+	return func(o *samplerOptions) {
+		o.posParams.OperationNameLateBinding = enable
+	}
+}
+
+// InitialSampler creates a SamplerOption that sets the initial sampler
+// to use before a remote sampler is created and used.
+func (SamplerOptionsFactory) InitialSampler(sampler Sampler) SamplerOption {
+	return func(o *samplerOptions) {
+		o.sampler = samplerV1toV2(sampler)
+	}
+}
+
+// Logger creates a SamplerOption that sets the logger used by the sampler.
+func (SamplerOptionsFactory) Logger(logger Logger) SamplerOption {
+	return func(o *samplerOptions) {
+		o.logger = log.DebugLogAdapter(logger)
+	}
+}
+
+// SamplingServerURL creates a SamplerOption that sets the sampling server url
+// of the local agent that contains the sampling strategies.
+func (SamplerOptionsFactory) SamplingServerURL(samplingServerURL string) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingServerURL = samplingServerURL
+	}
+}
+
+// SamplingRefreshInterval creates a SamplerOption that sets how often the
+// sampler will poll local agent for the appropriate sampling strategy.
+func (SamplerOptionsFactory) SamplingRefreshInterval(samplingRefreshInterval time.Duration) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingRefreshInterval = samplingRefreshInterval
+	}
+}
+
+// SamplingStrategyFetcher creates a SamplerOption that initializes sampling strategy fetcher.
+func (SamplerOptionsFactory) SamplingStrategyFetcher(fetcher SamplingStrategyFetcher) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingFetcher = fetcher
+	}
+}
+
+// SamplingStrategyParser creates a SamplerOption that initializes sampling strategy parser.
+func (SamplerOptionsFactory) SamplingStrategyParser(parser SamplingStrategyParser) SamplerOption {
+	return func(o *samplerOptions) {
+		o.samplingParser = parser
+	}
+}
+
+// Updaters creates a SamplerOption that initializes sampler updaters.
+func (SamplerOptionsFactory) Updaters(updaters ...SamplerUpdater) SamplerOption {
+	return func(o *samplerOptions) {
+		o.updaters = updaters
+	}
+}
+
+func (o *samplerOptions) applyOptionsAndDefaults(opts ...SamplerOption) *samplerOptions {
+	for _, option := range opts {
+		option(o)
+	}
+	if o.sampler == nil {
+		o.sampler = newProbabilisticSampler(0.001)
+	}
+	if o.logger == nil {
+		o.logger = log.NullLogger
+	}
+	if o.samplingServerURL == "" {
+		o.samplingServerURL = DefaultSamplingServerURL
+	}
+	if o.metrics == nil {
+		o.metrics = NewNullMetrics()
+	}
+	if o.samplingRefreshInterval <= 0 {
+		o.samplingRefreshInterval = defaultSamplingRefreshInterval
+	}
+	if o.samplingFetcher == nil {
+		o.samplingFetcher = newHTTPSamplingStrategyFetcher(o.samplingServerURL, o.logger)
+	}
+	if o.samplingParser == nil {
+		o.samplingParser = new(samplingStrategyParser)
+	}
+	if o.updaters == nil {
+		o.updaters = []SamplerUpdater{
+			&AdaptiveSamplerUpdater{
+				MaxOperations:            o.posParams.MaxOperations,
+				OperationNameLateBinding: o.posParams.OperationNameLateBinding,
+			},
+			new(ProbabilisticSamplerUpdater),
+			new(RateLimitingSamplerUpdater),
+		}
+	}
+	return o
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/sampler_v2.go b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
new file mode 100644
index 0000000..a50671a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/sampler_v2.go
@@ -0,0 +1,93 @@
+// Copyright (c) 2019 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+// SamplingDecision is returned by the V2 samplers.
+type SamplingDecision struct {
+	Sample    bool
+	Retryable bool
+	Tags      []Tag
+}
+
+// SamplerV2 is an extension of the V1 samplers that allows sampling decisions
+// be made at different points of the span lifecycle.
+type SamplerV2 interface {
+	OnCreateSpan(span *Span) SamplingDecision
+	OnSetOperationName(span *Span, operationName string) SamplingDecision
+	OnSetTag(span *Span, key string, value interface{}) SamplingDecision
+	OnFinishSpan(span *Span) SamplingDecision
+
+	// Close does a clean shutdown of the sampler, stopping any background
+	// go-routines it may have started.
+	Close()
+}
+
+// samplerV1toV2 wraps legacy V1 sampler into an adapter that make it look like V2.
+func samplerV1toV2(s Sampler) SamplerV2 {
+	if s2, ok := s.(SamplerV2); ok {
+		return s2
+	}
+	type legacySamplerV1toV2Adapter struct {
+		legacySamplerV1Base
+	}
+	return &legacySamplerV1toV2Adapter{
+		legacySamplerV1Base: legacySamplerV1Base{
+			delegate: s.IsSampled,
+		},
+	}
+}
+
+// SamplerV2Base can be used by V2 samplers to implement dummy V1 methods.
+// Supporting V1 API is required because Tracer configuration only accepts V1 Sampler
+// for backwards compatibility reasons.
+// TODO (breaking change) remove this in the next major release
+type SamplerV2Base struct{}
+
+// IsSampled implements IsSampled of Sampler.
+func (SamplerV2Base) IsSampled(id TraceID, operation string) (sampled bool, tags []Tag) {
+	return false, nil
+}
+
+// Close implements Close of Sampler.
+func (SamplerV2Base) Close() {}
+
+// Equal implements Equal of Sampler.
+func (SamplerV2Base) Equal(other Sampler) bool { return false }
+
+// legacySamplerV1Base is used as a base for simple samplers that only implement
+// the legacy isSampled() function that is not sensitive to its arguments.
+type legacySamplerV1Base struct {
+	delegate func(id TraceID, operation string) (sampled bool, tags []Tag)
+}
+
+func (s *legacySamplerV1Base) OnCreateSpan(span *Span) SamplingDecision {
+	isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+	return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetOperationName(span *Span, operationName string) SamplingDecision {
+	isSampled, tags := s.delegate(span.context.traceID, span.operationName)
+	return SamplingDecision{Sample: isSampled, Retryable: false, Tags: tags}
+}
+
+func (s *legacySamplerV1Base) OnSetTag(span *Span, key string, value interface{}) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) OnFinishSpan(span *Span) SamplingDecision {
+	return SamplingDecision{Sample: false, Retryable: true}
+}
+
+func (s *legacySamplerV1Base) Close() {}
diff --git a/vendor/github.com/uber/jaeger-client-go/span.go b/vendor/github.com/uber/jaeger-client-go/span.go
new file mode 100644
index 0000000..997cffd
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span.go
@@ -0,0 +1,503 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+	"github.com/opentracing/opentracing-go/log"
+)
+
+// Span implements opentracing.Span
+type Span struct {
+	// referenceCounter used to increase the lifetime of
+	// the object before return it into the pool.
+	referenceCounter int32
+
+	sync.RWMutex
+
+	tracer *Tracer
+
+	// TODO: (breaking change) change to use a pointer
+	context SpanContext
+
+	// The name of the "operation" this span is an instance of.
+	// Known as a "span name" in some implementations.
+	operationName string
+
+	// firstInProcess, if true, indicates that this span is the root of the (sub)tree
+	// of spans in the current process. In other words it's true for the root spans,
+	// and the ingress spans when the process joins another trace.
+	firstInProcess bool
+
+	// startTime is the timestamp indicating when the span began, with microseconds precision.
+	startTime time.Time
+
+	// duration returns duration of the span with microseconds precision.
+	// Zero value means duration is unknown.
+	duration time.Duration
+
+	// tags attached to this span
+	tags []Tag
+
+	// The span's "micro-log"
+	logs []opentracing.LogRecord
+
+	// The number of logs dropped because of MaxLogsPerSpan.
+	numDroppedLogs int
+
+	// references for this span
+	references []Reference
+
+	observer ContribSpanObserver
+}
+
+// Tag is a simple key value wrapper.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+type Tag struct {
+	key   string
+	value interface{}
+}
+
+// NewTag creates a new Tag.
+// TODO (breaking change) deprecate in the next major release, use opentracing.Tag instead.
+func NewTag(key string, value interface{}) Tag {
+	return Tag{key: key, value: value}
+}
+
+// SetOperationName sets or changes the operation name.
+func (s *Span) SetOperationName(operationName string) opentracing.Span {
+	s.Lock()
+	s.operationName = operationName
+	ctx := s.context
+	s.Unlock()
+	if !ctx.isSamplingFinalized() {
+		decision := s.tracer.sampler.OnSetOperationName(s, operationName)
+		s.applySamplingDecision(decision, true)
+	}
+	s.observer.OnSetOperationName(operationName)
+	return s
+}
+
+// SetTag implements SetTag() of opentracing.Span
+func (s *Span) SetTag(key string, value interface{}) opentracing.Span {
+	return s.setTagInternal(key, value, true)
+}
+
+func (s *Span) setTagInternal(key string, value interface{}, lock bool) opentracing.Span {
+	var ctx SpanContext
+	var operationName string
+	if lock {
+		ctx = s.SpanContext()
+		operationName = s.OperationName()
+	} else {
+		ctx = s.context
+		operationName = s.operationName
+	}
+
+	s.observer.OnSetTag(key, value)
+	if key == string(ext.SamplingPriority) && !setSamplingPriority(ctx.samplingState, operationName, s.tracer, value) {
+		return s
+	}
+	if !ctx.isSamplingFinalized() {
+		decision := s.tracer.sampler.OnSetTag(s, key, value)
+		s.applySamplingDecision(decision, lock)
+	}
+	if ctx.isWriteable() {
+		if lock {
+			s.Lock()
+			defer s.Unlock()
+		}
+		s.appendTagNoLocking(key, value)
+	}
+	return s
+}
+
+// SpanContext returns span context
+func (s *Span) SpanContext() SpanContext {
+	s.Lock()
+	defer s.Unlock()
+	return s.context
+}
+
+// StartTime returns span start time
+func (s *Span) StartTime() time.Time {
+	s.Lock()
+	defer s.Unlock()
+	return s.startTime
+}
+
+// Duration returns span duration
+func (s *Span) Duration() time.Duration {
+	s.Lock()
+	defer s.Unlock()
+	return s.duration
+}
+
+// Tags returns tags for span
+func (s *Span) Tags() opentracing.Tags {
+	s.Lock()
+	defer s.Unlock()
+	var result = make(opentracing.Tags, len(s.tags))
+	for _, tag := range s.tags {
+		result[tag.key] = tag.value
+	}
+	return result
+}
+
+// Logs returns micro logs for span
+func (s *Span) Logs() []opentracing.LogRecord {
+	s.Lock()
+	defer s.Unlock()
+
+	logs := append([]opentracing.LogRecord(nil), s.logs...)
+	if s.numDroppedLogs != 0 {
+		fixLogs(logs, s.numDroppedLogs)
+	}
+
+	return logs
+}
+
+// References returns references for this span
+func (s *Span) References() []opentracing.SpanReference {
+	s.Lock()
+	defer s.Unlock()
+
+	if s.references == nil || len(s.references) == 0 {
+		return nil
+	}
+
+	result := make([]opentracing.SpanReference, len(s.references))
+	for i, r := range s.references {
+		result[i] = opentracing.SpanReference{Type: r.Type, ReferencedContext: r.Context}
+	}
+	return result
+}
+
+func (s *Span) appendTagNoLocking(key string, value interface{}) {
+	s.tags = append(s.tags, Tag{key: key, value: value})
+}
+
+// LogFields implements opentracing.Span API
+func (s *Span) LogFields(fields ...log.Field) {
+	s.Lock()
+	defer s.Unlock()
+	if !s.context.IsSampled() {
+		return
+	}
+	s.logFieldsNoLocking(fields...)
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) logFieldsNoLocking(fields ...log.Field) {
+	lr := opentracing.LogRecord{
+		Fields:    fields,
+		Timestamp: time.Now(),
+	}
+	s.appendLogNoLocking(lr)
+}
+
+// LogKV implements opentracing.Span API
+func (s *Span) LogKV(alternatingKeyValues ...interface{}) {
+	s.RLock()
+	sampled := s.context.IsSampled()
+	s.RUnlock()
+	if !sampled {
+		return
+	}
+	fields, err := log.InterleavedKVToFields(alternatingKeyValues...)
+	if err != nil {
+		s.LogFields(log.Error(err), log.String("function", "LogKV"))
+		return
+	}
+	s.LogFields(fields...)
+}
+
+// LogEvent implements opentracing.Span API
+func (s *Span) LogEvent(event string) {
+	s.Log(opentracing.LogData{Event: event})
+}
+
+// LogEventWithPayload implements opentracing.Span API
+func (s *Span) LogEventWithPayload(event string, payload interface{}) {
+	s.Log(opentracing.LogData{Event: event, Payload: payload})
+}
+
+// Log implements opentracing.Span API
+func (s *Span) Log(ld opentracing.LogData) {
+	s.Lock()
+	defer s.Unlock()
+	if s.context.IsSampled() {
+		if ld.Timestamp.IsZero() {
+			ld.Timestamp = s.tracer.timeNow()
+		}
+		s.appendLogNoLocking(ld.ToLogRecord())
+	}
+}
+
+// this function should only be called while holding a Write lock
+func (s *Span) appendLogNoLocking(lr opentracing.LogRecord) {
+	maxLogs := s.tracer.options.maxLogsPerSpan
+	if maxLogs == 0 || len(s.logs) < maxLogs {
+		s.logs = append(s.logs, lr)
+		return
+	}
+
+	// We have too many logs. We don't touch the first numOld logs; we treat the
+	// rest as a circular buffer and overwrite the oldest log among those.
+	numOld := (maxLogs - 1) / 2
+	numNew := maxLogs - numOld
+	s.logs[numOld+s.numDroppedLogs%numNew] = lr
+	s.numDroppedLogs++
+}
+
+// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
+// the end (i.e. pos circular left shifts).
+func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
+	// This algorithm is described in:
+	//    http://www.cplusplus.com/reference/algorithm/rotate
+	for first, middle, next := 0, pos, pos; first != middle; {
+		buf[first], buf[next] = buf[next], buf[first]
+		first++
+		next++
+		if next == len(buf) {
+			next = middle
+		} else if first == middle {
+			middle = next
+		}
+	}
+}
+
+func fixLogs(logs []opentracing.LogRecord, numDroppedLogs int) {
+	// We dropped some log events, which means that we used part of Logs as a
+	// circular buffer (see appendLog). De-circularize it.
+	numOld := (len(logs) - 1) / 2
+	numNew := len(logs) - numOld
+	rotateLogBuffer(logs[numOld:], numDroppedLogs%numNew)
+
+	// Replace the log in the middle (the oldest "new" log) with information
+	// about the dropped logs. This means that we are effectively dropping one
+	// more "new" log.
+	numDropped := numDroppedLogs + 1
+	logs[numOld] = opentracing.LogRecord{
+		// Keep the timestamp of the last dropped event.
+		Timestamp: logs[numOld].Timestamp,
+		Fields: []log.Field{
+			log.String("event", "dropped Span logs"),
+			log.Int("dropped_log_count", numDropped),
+			log.String("component", "jaeger-client"),
+		},
+	}
+}
+
+func (s *Span) fixLogsIfDropped() {
+	if s.numDroppedLogs == 0 {
+		return
+	}
+	fixLogs(s.logs, s.numDroppedLogs)
+	s.numDroppedLogs = 0
+}
+
+// SetBaggageItem implements SetBaggageItem() of opentracing.SpanContext.
+// The call is proxied via tracer.baggageSetter to allow policies to be applied
+// before allowing to set/replace baggage keys.
+// The setter eventually stores a new SpanContext with extended baggage:
+//
+//     span.context = span.context.WithBaggageItem(key, value)
+//
+//  See SpanContext.WithBaggageItem() for explanation why it's done this way.
+func (s *Span) SetBaggageItem(key, value string) opentracing.Span {
+	s.Lock()
+	defer s.Unlock()
+	s.tracer.setBaggage(s, key, value)
+	return s
+}
+
+// BaggageItem implements BaggageItem() of opentracing.SpanContext
+func (s *Span) BaggageItem(key string) string {
+	s.RLock()
+	defer s.RUnlock()
+	return s.context.baggage[key]
+}
+
+// Finish implements opentracing.Span API
+// After finishing the Span object it returns back to the allocator unless the reporter retains it again,
+// so after that, the Span object should no longer be used because it won't be valid anymore.
+func (s *Span) Finish() {
+	s.FinishWithOptions(opentracing.FinishOptions{})
+}
+
+// FinishWithOptions implements opentracing.Span API
+func (s *Span) FinishWithOptions(options opentracing.FinishOptions) {
+	if options.FinishTime.IsZero() {
+		options.FinishTime = s.tracer.timeNow()
+	}
+	s.observer.OnFinish(options)
+	s.Lock()
+	s.duration = options.FinishTime.Sub(s.startTime)
+	ctx := s.context
+	s.Unlock()
+	if !ctx.isSamplingFinalized() {
+		decision := s.tracer.sampler.OnFinishSpan(s)
+		s.applySamplingDecision(decision, true)
+	}
+	if ctx.IsSampled() {
+		s.Lock()
+		s.fixLogsIfDropped()
+		if len(options.LogRecords) > 0 || len(options.BulkLogData) > 0 {
+			// Note: bulk logs are not subject to maxLogsPerSpan limit
+			if options.LogRecords != nil {
+				s.logs = append(s.logs, options.LogRecords...)
+			}
+			for _, ld := range options.BulkLogData {
+				s.logs = append(s.logs, ld.ToLogRecord())
+			}
+		}
+		s.Unlock()
+	}
+	// call reportSpan even for non-sampled traces, to return span to the pool
+	// and update metrics counter
+	s.tracer.reportSpan(s)
+}
+
+// Context implements opentracing.Span API
+func (s *Span) Context() opentracing.SpanContext {
+	s.Lock()
+	defer s.Unlock()
+	return s.context
+}
+
+// Tracer implements opentracing.Span API
+func (s *Span) Tracer() opentracing.Tracer {
+	return s.tracer
+}
+
+func (s *Span) String() string {
+	s.RLock()
+	defer s.RUnlock()
+	return s.context.String()
+}
+
+// OperationName allows retrieving current operation name.
+func (s *Span) OperationName() string {
+	s.RLock()
+	defer s.RUnlock()
+	return s.operationName
+}
+
+// Retain increases object counter to increase the lifetime of the object
+func (s *Span) Retain() *Span {
+	atomic.AddInt32(&s.referenceCounter, 1)
+	return s
+}
+
+// Release decrements object counter and return to the
+// allocator manager  when counter will below zero
+func (s *Span) Release() {
+	if atomic.AddInt32(&s.referenceCounter, -1) == -1 {
+		s.tracer.spanAllocator.Put(s)
+	}
+}
+
+// reset span state and release unused data
+func (s *Span) reset() {
+	s.firstInProcess = false
+	s.context = emptyContext
+	s.operationName = ""
+	s.tracer = nil
+	s.startTime = time.Time{}
+	s.duration = 0
+	s.observer = nil
+	atomic.StoreInt32(&s.referenceCounter, 0)
+
+	// Note: To reuse memory we can save the pointers on the heap
+	s.tags = s.tags[:0]
+	s.logs = s.logs[:0]
+	s.numDroppedLogs = 0
+	s.references = s.references[:0]
+}
+
+func (s *Span) serviceName() string {
+	return s.tracer.serviceName
+}
+
+func (s *Span) applySamplingDecision(decision SamplingDecision, lock bool) {
+	var ctx SpanContext
+	if lock {
+		ctx = s.SpanContext()
+	} else {
+		ctx = s.context
+	}
+
+	if !decision.Retryable {
+		ctx.samplingState.setFinal()
+	}
+	if decision.Sample {
+		ctx.samplingState.setSampled()
+		if len(decision.Tags) > 0 {
+			if lock {
+				s.Lock()
+				defer s.Unlock()
+			}
+			for _, tag := range decision.Tags {
+				s.appendTagNoLocking(tag.key, tag.value)
+			}
+		}
+	}
+}
+
+// setSamplingPriority returns true if the flag was updated successfully, false otherwise.
+// The behavior of setSamplingPriority is surprising
+// If noDebugFlagOnForcedSampling is set
+//     setSamplingPriority(..., 1) always sets only flagSampled
+// If noDebugFlagOnForcedSampling is unset, and isDebugAllowed passes
+//     setSamplingPriority(..., 1) sets both flagSampled and flagDebug
+// However,
+//     setSamplingPriority(..., 0) always only resets flagSampled
+//
+// This means that doing a setSamplingPriority(..., 1) followed by setSamplingPriority(..., 0) can
+// leave flagDebug set
+func setSamplingPriority(state *samplingState, operationName string, tracer *Tracer, value interface{}) bool {
+	val, ok := value.(uint16)
+	if !ok {
+		return false
+	}
+	if val == 0 {
+		state.unsetSampled()
+		state.setFinal()
+		return true
+	}
+	if tracer.options.noDebugFlagOnForcedSampling {
+		state.setSampled()
+		state.setFinal()
+		return true
+	} else if tracer.isDebugAllowed(operationName) {
+		state.setDebugAndSampled()
+		state.setFinal()
+		return true
+	}
+	return false
+}
+
+// EnableFirehose enables firehose flag on the span context
+func EnableFirehose(s *Span) {
+	s.Lock()
+	defer s.Unlock()
+	s.context.samplingState.setFirehose()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_allocator.go b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
new file mode 100644
index 0000000..6fe0cd0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span_allocator.go
@@ -0,0 +1,56 @@
+// Copyright (c) 2019 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import "sync"
+
+// SpanAllocator abstraction of managign span allocations
+type SpanAllocator interface {
+	Get() *Span
+	Put(*Span)
+}
+
+type syncPollSpanAllocator struct {
+	spanPool sync.Pool
+}
+
+func newSyncPollSpanAllocator() SpanAllocator {
+	return &syncPollSpanAllocator{
+		spanPool: sync.Pool{New: func() interface{} {
+			return &Span{}
+		}},
+	}
+}
+
+func (pool *syncPollSpanAllocator) Get() *Span {
+	return pool.spanPool.Get().(*Span)
+}
+
+func (pool *syncPollSpanAllocator) Put(span *Span) {
+	span.reset()
+	pool.spanPool.Put(span)
+}
+
+type simpleSpanAllocator struct{}
+
+func (pool simpleSpanAllocator) Get() *Span {
+	return &Span{}
+}
+
+func (pool simpleSpanAllocator) Put(span *Span) {
+	// @comment https://github.com/jaegertracing/jaeger-client-go/pull/381#issuecomment-475904351
+	// since finished spans are not reused, no need to reset them
+	// span.reset()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/span_context.go b/vendor/github.com/uber/jaeger-client-go/span_context.go
new file mode 100644
index 0000000..5b2307b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/span_context.go
@@ -0,0 +1,418 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"errors"
+	"fmt"
+	"strconv"
+	"strings"
+	"sync"
+
+	"go.uber.org/atomic"
+)
+
+const (
+	flagSampled  = 1
+	flagDebug    = 2
+	flagFirehose = 8
+)
+
+var (
+	errEmptyTracerStateString     = errors.New("Cannot convert empty string to tracer state")
+	errMalformedTracerStateString = errors.New("String does not match tracer state format")
+
+	emptyContext = SpanContext{}
+)
+
+// TraceID represents unique 128bit identifier of a trace
+type TraceID struct {
+	High, Low uint64
+}
+
+// SpanID represents unique 64bit identifier of a span
+type SpanID uint64
+
+// SpanContext represents propagated span identity and state
+type SpanContext struct {
+	// traceID represents globally unique ID of the trace.
+	// Usually generated as a random number.
+	traceID TraceID
+
+	// spanID represents span ID that must be unique within its trace,
+	// but does not have to be globally unique.
+	spanID SpanID
+
+	// parentID refers to the ID of the parent span.
+	// Should be 0 if the current span is a root span.
+	parentID SpanID
+
+	// Distributed Context baggage. The is a snapshot in time.
+	baggage map[string]string
+
+	// debugID can be set to some correlation ID when the context is being
+	// extracted from a TextMap carrier.
+	//
+	// See JaegerDebugHeader in constants.go
+	debugID string
+
+	// samplingState is shared across all spans
+	samplingState *samplingState
+
+	// remote indicates that span context represents a remote parent
+	remote bool
+}
+
+type samplingState struct {
+	// Span context's state flags that are propagated across processes. Only lower 8 bits are used.
+	// We use an int32 instead of byte to be able to use CAS operations.
+	stateFlags atomic.Int32
+
+	// When state is not final, sampling will be retried on other span write operations,
+	// like SetOperationName / SetTag, and the spans will remain writable.
+	final atomic.Bool
+
+	// localRootSpan stores the SpanID of the first span created in this process for a given trace.
+	localRootSpan SpanID
+
+	// extendedState allows samplers to keep intermediate state.
+	// The keys and values in this map are completely opaque: interface{} -> interface{}.
+	extendedState sync.Map
+}
+
+func (s *samplingState) isLocalRootSpan(id SpanID) bool {
+	return id == s.localRootSpan
+}
+
+func (s *samplingState) setFlag(newFlag int32) {
+	swapped := false
+	for !swapped {
+		old := s.stateFlags.Load()
+		swapped = s.stateFlags.CAS(old, old|newFlag)
+	}
+}
+
+func (s *samplingState) unsetFlag(newFlag int32) {
+	swapped := false
+	for !swapped {
+		old := s.stateFlags.Load()
+		swapped = s.stateFlags.CAS(old, old&^newFlag)
+	}
+}
+
+func (s *samplingState) setSampled() {
+	s.setFlag(flagSampled)
+}
+
+func (s *samplingState) unsetSampled() {
+	s.unsetFlag(flagSampled)
+}
+
+func (s *samplingState) setDebugAndSampled() {
+	s.setFlag(flagDebug | flagSampled)
+}
+
+func (s *samplingState) setFirehose() {
+	s.setFlag(flagFirehose)
+}
+
+func (s *samplingState) setFlags(flags byte) {
+	s.stateFlags.Store(int32(flags))
+}
+
+func (s *samplingState) setFinal() {
+	s.final.Store(true)
+}
+
+func (s *samplingState) flags() byte {
+	return byte(s.stateFlags.Load())
+}
+
+func (s *samplingState) isSampled() bool {
+	return s.stateFlags.Load()&flagSampled == flagSampled
+}
+
+func (s *samplingState) isDebug() bool {
+	return s.stateFlags.Load()&flagDebug == flagDebug
+}
+
+func (s *samplingState) isFirehose() bool {
+	return s.stateFlags.Load()&flagFirehose == flagFirehose
+}
+
+func (s *samplingState) isFinal() bool {
+	return s.final.Load()
+}
+
+func (s *samplingState) extendedStateForKey(key interface{}, initValue func() interface{}) interface{} {
+	if value, ok := s.extendedState.Load(key); ok {
+		return value
+	}
+	value := initValue()
+	value, _ = s.extendedState.LoadOrStore(key, value)
+	return value
+}
+
+// ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext
+func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
+	for k, v := range c.baggage {
+		if !handler(k, v) {
+			break
+		}
+	}
+}
+
+// IsSampled returns whether this trace was chosen for permanent storage
+// by the sampling mechanism of the tracer.
+func (c SpanContext) IsSampled() bool {
+	return c.samplingState.isSampled()
+}
+
+// IsDebug indicates whether sampling was explicitly requested by the service.
+func (c SpanContext) IsDebug() bool {
+	return c.samplingState.isDebug()
+}
+
+// IsSamplingFinalized indicates whether the sampling decision has been finalized.
+func (c SpanContext) IsSamplingFinalized() bool {
+	return c.samplingState.isFinal()
+}
+
+// IsFirehose indicates whether the firehose flag was set
+func (c SpanContext) IsFirehose() bool {
+	return c.samplingState.isFirehose()
+}
+
+// ExtendedSamplingState returns the custom state object for a given key. If the value for this key does not exist,
+// it is initialized via initValue function. This state can be used by samplers (e.g. x.PrioritySampler).
+func (c SpanContext) ExtendedSamplingState(key interface{}, initValue func() interface{}) interface{} {
+	return c.samplingState.extendedStateForKey(key, initValue)
+}
+
+// IsValid indicates whether this context actually represents a valid trace.
+func (c SpanContext) IsValid() bool {
+	return c.traceID.IsValid() && c.spanID != 0
+}
+
+// SetFirehose enables firehose mode for this trace.
+func (c SpanContext) SetFirehose() {
+	c.samplingState.setFirehose()
+}
+
+func (c SpanContext) String() string {
+	var flags int32
+	if c.samplingState != nil {
+		flags = c.samplingState.stateFlags.Load()
+	}
+	if c.traceID.High == 0 {
+		return fmt.Sprintf("%016x:%016x:%016x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
+	}
+	return fmt.Sprintf("%016x%016x:%016x:%016x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), flags)
+}
+
+// ContextFromString reconstructs the Context encoded in a string
+func ContextFromString(value string) (SpanContext, error) {
+	var context SpanContext
+	if value == "" {
+		return emptyContext, errEmptyTracerStateString
+	}
+	parts := strings.Split(value, ":")
+	if len(parts) != 4 {
+		return emptyContext, errMalformedTracerStateString
+	}
+	var err error
+	if context.traceID, err = TraceIDFromString(parts[0]); err != nil {
+		return emptyContext, err
+	}
+	if context.spanID, err = SpanIDFromString(parts[1]); err != nil {
+		return emptyContext, err
+	}
+	if context.parentID, err = SpanIDFromString(parts[2]); err != nil {
+		return emptyContext, err
+	}
+	flags, err := strconv.ParseUint(parts[3], 10, 8)
+	if err != nil {
+		return emptyContext, err
+	}
+	context.samplingState = &samplingState{}
+	context.samplingState.setFlags(byte(flags))
+	return context, nil
+}
+
+// TraceID returns the trace ID of this span context
+func (c SpanContext) TraceID() TraceID {
+	return c.traceID
+}
+
+// SpanID returns the span ID of this span context
+func (c SpanContext) SpanID() SpanID {
+	return c.spanID
+}
+
+// ParentID returns the parent span ID of this span context
+func (c SpanContext) ParentID() SpanID {
+	return c.parentID
+}
+
+// Flags returns the bitmap containing such bits as 'sampled' and 'debug'.
+func (c SpanContext) Flags() byte {
+	return c.samplingState.flags()
+}
+
+// Span can be written to if it is sampled or the sampling decision has not been finalized.
+func (c SpanContext) isWriteable() bool {
+	state := c.samplingState
+	return !state.isFinal() || state.isSampled()
+}
+
+func (c SpanContext) isSamplingFinalized() bool {
+	return c.samplingState.isFinal()
+}
+
+// NewSpanContext creates a new instance of SpanContext
+func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext {
+	samplingState := &samplingState{}
+	if sampled {
+		samplingState.setSampled()
+	}
+
+	return SpanContext{
+		traceID:       traceID,
+		spanID:        spanID,
+		parentID:      parentID,
+		samplingState: samplingState,
+		baggage:       baggage}
+}
+
+// CopyFrom copies data from ctx into this context, including span identity and baggage.
+// TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing.
+func (c *SpanContext) CopyFrom(ctx *SpanContext) {
+	c.traceID = ctx.traceID
+	c.spanID = ctx.spanID
+	c.parentID = ctx.parentID
+	c.samplingState = ctx.samplingState
+	if l := len(ctx.baggage); l > 0 {
+		c.baggage = make(map[string]string, l)
+		for k, v := range ctx.baggage {
+			c.baggage[k] = v
+		}
+	} else {
+		c.baggage = nil
+	}
+}
+
+// WithBaggageItem creates a new context with an extra baggage item.
+// Delete a baggage item if provided blank value.
+//
+// The SpanContext is designed to be immutable and passed by value. As such,
+// it cannot contain any locks, and should only hold immutable data, including baggage.
+// Another reason for why baggage is immutable is when the span context is passed
+// as a parent when starting a new span. The new span's baggage cannot affect the parent
+// span's baggage, so the child span either needs to take a copy of the parent baggage
+// (which is expensive and unnecessary since baggage rarely changes in the life span of
+// a trace), or it needs to do a copy-on-write, which is the approach taken here.
+func (c SpanContext) WithBaggageItem(key, value string) SpanContext {
+	var newBaggage map[string]string
+	// unset baggage item
+	if value == "" {
+		if _, ok := c.baggage[key]; !ok {
+			return c
+		}
+		newBaggage = make(map[string]string, len(c.baggage))
+		for k, v := range c.baggage {
+			newBaggage[k] = v
+		}
+		delete(newBaggage, key)
+		return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
+	}
+	if c.baggage == nil {
+		newBaggage = map[string]string{key: value}
+	} else {
+		newBaggage = make(map[string]string, len(c.baggage)+1)
+		for k, v := range c.baggage {
+			newBaggage[k] = v
+		}
+		newBaggage[key] = value
+	}
+	// Use positional parameters so the compiler will help catch new fields.
+	return SpanContext{c.traceID, c.spanID, c.parentID, newBaggage, "", c.samplingState, c.remote}
+}
+
+// isDebugIDContainerOnly returns true when the instance of the context is only
+// used to return the debug/correlation ID from extract() method. This happens
+// in the situation when "jaeger-debug-id" header is passed in the carrier to
+// the extract() method, but the request otherwise has no span context in it.
+// Previously this would've returned opentracing.ErrSpanContextNotFound from the
+// extract method, but now it returns a dummy context with only debugID filled in.
+//
+// See JaegerDebugHeader in constants.go
+// See TextMapPropagator#Extract
+func (c *SpanContext) isDebugIDContainerOnly() bool {
+	return !c.traceID.IsValid() && c.debugID != ""
+}
+
+// ------- TraceID -------
+
+func (t TraceID) String() string {
+	if t.High == 0 {
+		return fmt.Sprintf("%016x", t.Low)
+	}
+	return fmt.Sprintf("%016x%016x", t.High, t.Low)
+}
+
+// TraceIDFromString creates a TraceID from a hexadecimal string
+func TraceIDFromString(s string) (TraceID, error) {
+	var hi, lo uint64
+	var err error
+	if len(s) > 32 {
+		return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s)
+	} else if len(s) > 16 {
+		hiLen := len(s) - 16
+		if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil {
+			return TraceID{}, err
+		}
+		if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil {
+			return TraceID{}, err
+		}
+	} else {
+		if lo, err = strconv.ParseUint(s, 16, 64); err != nil {
+			return TraceID{}, err
+		}
+	}
+	return TraceID{High: hi, Low: lo}, nil
+}
+
+// IsValid checks if the trace ID is valid, i.e. not zero.
+func (t TraceID) IsValid() bool {
+	return t.High != 0 || t.Low != 0
+}
+
+// ------- SpanID -------
+
+func (s SpanID) String() string {
+	return fmt.Sprintf("%016x", uint64(s))
+}
+
+// SpanIDFromString creates a SpanID from a hexadecimal string
+func SpanIDFromString(s string) (SpanID, error) {
+	if len(s) > 16 {
+		return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s)
+	}
+	id, err := strconv.ParseUint(s, 16, 64)
+	if err != nil {
+		return SpanID(0), err
+	}
+	return SpanID(id), nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
new file mode 100644
index 0000000..54cd3b0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
new file mode 100644
index 0000000..a0df507
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent-consts.go
@@ -0,0 +1,28 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
new file mode 100644
index 0000000..6472e84
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/agent/agent.go
@@ -0,0 +1,396 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package agent
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+var _ = jaeger.GoUnusedProtection__
+var _ = zipkincore.GoUnusedProtection__
+type Agent interface {
+  // Parameters:
+  //  - Spans
+  EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error)
+  // Parameters:
+  //  - Batch
+  EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error)
+}
+
+type AgentClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient {
+  return &AgentClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient {
+  return &AgentClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewAgentClient(c thrift.TClient) *AgentClient {
+  return &AgentClient{
+    c: c,
+  }
+}
+
+func (p *AgentClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *AgentClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *AgentClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - Spans
+func (p *AgentClient) EmitZipkinBatch(ctx context.Context, spans []*zipkincore.Span) (_err error) {
+  var _args0 AgentEmitZipkinBatchArgs
+  _args0.Spans = spans
+  p.SetLastResponseMeta_(thrift.ResponseMeta{})
+  if _, err := p.Client_().Call(ctx, "emitZipkinBatch", &_args0, nil); err != nil {
+    return err
+  }
+  return nil
+}
+
+// Parameters:
+//  - Batch
+func (p *AgentClient) EmitBatch(ctx context.Context, batch *jaeger.Batch) (_err error) {
+  var _args1 AgentEmitBatchArgs
+  _args1.Batch = batch
+  p.SetLastResponseMeta_(thrift.ResponseMeta{})
+  if _, err := p.Client_().Call(ctx, "emitBatch", &_args1, nil); err != nil {
+    return err
+  }
+  return nil
+}
+
+type AgentProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler Agent
+}
+
+func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewAgentProcessor(handler Agent) *AgentProcessor {
+
+  self2 := &AgentProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self2.processorMap["emitZipkinBatch"] = &agentProcessorEmitZipkinBatch{handler:handler}
+  self2.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler:handler}
+return self2
+}
+
+func (p *AgentProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x3.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x3
+
+}
+
+type agentProcessorEmitZipkinBatch struct {
+  handler Agent
+}
+
+func (p *agentProcessorEmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := AgentEmitZipkinBatchArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  _ = tickerCancel
+
+  if err2 = p.handler.EmitZipkinBatch(ctx, args.Spans); err2 != nil {
+    tickerCancel()
+    return true, thrift.WrapTException(err2)
+  }
+  tickerCancel()
+  return true, nil
+}
+
+type agentProcessorEmitBatch struct {
+  handler Agent
+}
+
+func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := AgentEmitBatchArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  _ = tickerCancel
+
+  if err2 = p.handler.EmitBatch(ctx, args.Batch); err2 != nil {
+    tickerCancel()
+    return true, thrift.WrapTException(err2)
+  }
+  tickerCancel()
+  return true, nil
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type AgentEmitZipkinBatchArgs struct {
+  Spans []*zipkincore.Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewAgentEmitZipkinBatchArgs() *AgentEmitZipkinBatchArgs {
+  return &AgentEmitZipkinBatchArgs{}
+}
+
+
+func (p *AgentEmitZipkinBatchArgs) GetSpans() []*zipkincore.Span {
+  return p.Spans
+}
+func (p *AgentEmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*zipkincore.Span, 0, size)
+  p.Spans =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem4 := &zipkincore.Span{}
+    if err := _elem4.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+    }
+    p.Spans = append(p.Spans, _elem4)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "emitZipkinBatch_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *AgentEmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Spans {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+  return err
+}
+
+func (p *AgentEmitZipkinBatchArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("AgentEmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Batch
+type AgentEmitBatchArgs struct {
+  Batch *jaeger.Batch `thrift:"batch,1" db:"batch" json:"batch"`
+}
+
+func NewAgentEmitBatchArgs() *AgentEmitBatchArgs {
+  return &AgentEmitBatchArgs{}
+}
+
+var AgentEmitBatchArgs_Batch_DEFAULT *jaeger.Batch
+func (p *AgentEmitBatchArgs) GetBatch() *jaeger.Batch {
+  if !p.IsSetBatch() {
+    return AgentEmitBatchArgs_Batch_DEFAULT
+  }
+return p.Batch
+}
+func (p *AgentEmitBatchArgs) IsSetBatch() bool {
+  return p.Batch != nil
+}
+
+func (p *AgentEmitBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *AgentEmitBatchArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Batch = &jaeger.Batch{}
+  if err := p.Batch.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err)
+  }
+  return nil
+}
+
+func (p *AgentEmitBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "emitBatch_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *AgentEmitBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "batch", thrift.STRUCT, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) }
+  if err := p.Batch.Write(ctx, oprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) }
+  return err
+}
+
+func (p *AgentEmitBatchArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
new file mode 100644
index 0000000..712b6a9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
new file mode 100644
index 0000000..39b5a7e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage-consts.go
@@ -0,0 +1,23 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
new file mode 100644
index 0000000..e4d89d5
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/baggage/baggage.go
@@ -0,0 +1,565 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package baggage
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+// Attributes:
+//  - BaggageKey
+//  - MaxValueLength
+type BaggageRestriction struct {
+  BaggageKey string `thrift:"baggageKey,1,required" db:"baggageKey" json:"baggageKey"`
+  MaxValueLength int32 `thrift:"maxValueLength,2,required" db:"maxValueLength" json:"maxValueLength"`
+}
+
+func NewBaggageRestriction() *BaggageRestriction {
+  return &BaggageRestriction{}
+}
+
+
+func (p *BaggageRestriction) GetBaggageKey() string {
+  return p.BaggageKey
+}
+
+func (p *BaggageRestriction) GetMaxValueLength() int32 {
+  return p.MaxValueLength
+}
+func (p *BaggageRestriction) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetBaggageKey bool = false;
+  var issetMaxValueLength bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetBaggageKey = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetMaxValueLength = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetBaggageKey{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field BaggageKey is not set"));
+  }
+  if !issetMaxValueLength{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxValueLength is not set"));
+  }
+  return nil
+}
+
+func (p *BaggageRestriction)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.BaggageKey = v
+}
+  return nil
+}
+
+func (p *BaggageRestriction)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.MaxValueLength = v
+}
+  return nil
+}
+
+func (p *BaggageRestriction) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "BaggageRestriction"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BaggageRestriction) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "baggageKey", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:baggageKey: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.BaggageKey)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.baggageKey (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:baggageKey: ", p), err) }
+  return err
+}
+
+func (p *BaggageRestriction) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "maxValueLength", thrift.I32, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:maxValueLength: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.MaxValueLength)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.maxValueLength (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:maxValueLength: ", p), err) }
+  return err
+}
+
+func (p *BaggageRestriction) Equals(other *BaggageRestriction) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.BaggageKey != other.BaggageKey { return false }
+  if p.MaxValueLength != other.MaxValueLength { return false }
+  return true
+}
+
+func (p *BaggageRestriction) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BaggageRestriction(%+v)", *p)
+}
+
+type BaggageRestrictionManager interface {
+  // getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+  // Usually, baggageRestrictions apply to all services however there may be situations
+  // where a baggageKey might only be allowed to be set by a specific service.
+  // 
+  // Parameters:
+  //  - ServiceName
+  GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error)
+}
+
+type BaggageRestrictionManagerClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewBaggageRestrictionManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *BaggageRestrictionManagerClient {
+  return &BaggageRestrictionManagerClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewBaggageRestrictionManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *BaggageRestrictionManagerClient {
+  return &BaggageRestrictionManagerClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewBaggageRestrictionManagerClient(c thrift.TClient) *BaggageRestrictionManagerClient {
+  return &BaggageRestrictionManagerClient{
+    c: c,
+  }
+}
+
+func (p *BaggageRestrictionManagerClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *BaggageRestrictionManagerClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *BaggageRestrictionManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// getBaggageRestrictions retrieves the baggage restrictions for a specific service.
+// Usually, baggageRestrictions apply to all services however there may be situations
+// where a baggageKey might only be allowed to be set by a specific service.
+// 
+// Parameters:
+//  - ServiceName
+func (p *BaggageRestrictionManagerClient) GetBaggageRestrictions(ctx context.Context, serviceName string) (_r []*BaggageRestriction, _err error) {
+  var _args0 BaggageRestrictionManagerGetBaggageRestrictionsArgs
+  _args0.ServiceName = serviceName
+  var _result2 BaggageRestrictionManagerGetBaggageRestrictionsResult
+  var _meta1 thrift.ResponseMeta
+  _meta1, _err = p.Client_().Call(ctx, "getBaggageRestrictions", &_args0, &_result2)
+  p.SetLastResponseMeta_(_meta1)
+  if _err != nil {
+    return
+  }
+  return _result2.GetSuccess(), nil
+}
+
+type BaggageRestrictionManagerProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler BaggageRestrictionManager
+}
+
+func (p *BaggageRestrictionManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *BaggageRestrictionManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *BaggageRestrictionManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewBaggageRestrictionManagerProcessor(handler BaggageRestrictionManager) *BaggageRestrictionManagerProcessor {
+
+  self3 := &BaggageRestrictionManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self3.processorMap["getBaggageRestrictions"] = &baggageRestrictionManagerProcessorGetBaggageRestrictions{handler:handler}
+return self3
+}
+
+func (p *BaggageRestrictionManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x4 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x4.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x4
+
+}
+
+type baggageRestrictionManagerProcessorGetBaggageRestrictions struct {
+  handler BaggageRestrictionManager
+}
+
+func (p *baggageRestrictionManagerProcessorGetBaggageRestrictions) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+  var retval []*BaggageRestriction
+  if retval, err2 = p.handler.GetBaggageRestrictions(ctx, args.ServiceName); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getBaggageRestrictions: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "getBaggageRestrictions", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - ServiceName
+type BaggageRestrictionManagerGetBaggageRestrictionsArgs struct {
+  ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsArgs() *BaggageRestrictionManagerGetBaggageRestrictionsArgs {
+  return &BaggageRestrictionManagerGetBaggageRestrictionsArgs{}
+}
+
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) GetServiceName() string {
+  return p.ServiceName
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+  return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type BaggageRestrictionManagerGetBaggageRestrictionsResult struct {
+  Success []*BaggageRestriction `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewBaggageRestrictionManagerGetBaggageRestrictionsResult() *BaggageRestrictionManagerGetBaggageRestrictionsResult {
+  return &BaggageRestrictionManagerGetBaggageRestrictionsResult{}
+}
+
+var BaggageRestrictionManagerGetBaggageRestrictionsResult_Success_DEFAULT []*BaggageRestriction
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) GetSuccess() []*BaggageRestriction {
+  return p.Success
+}
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BaggageRestriction, 0, size)
+  p.Success =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem5 := &BaggageRestriction{}
+    if err := _elem5.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err)
+    }
+    p.Success = append(p.Success, _elem5)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getBaggageRestrictions_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Success {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *BaggageRestrictionManagerGetBaggageRestrictionsResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BaggageRestrictionManagerGetBaggageRestrictionsResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
new file mode 100644
index 0000000..fe45a9f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
new file mode 100644
index 0000000..b6ce855
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger-consts.go
@@ -0,0 +1,23 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
new file mode 100644
index 0000000..d55cca0
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/jaeger/jaeger.go
@@ -0,0 +1,2698 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package jaeger
+
+import(
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type TagType int64
+const (
+  TagType_STRING TagType = 0
+  TagType_DOUBLE TagType = 1
+  TagType_BOOL TagType = 2
+  TagType_LONG TagType = 3
+  TagType_BINARY TagType = 4
+)
+
+func (p TagType) String() string {
+  switch p {
+  case TagType_STRING: return "STRING"
+  case TagType_DOUBLE: return "DOUBLE"
+  case TagType_BOOL: return "BOOL"
+  case TagType_LONG: return "LONG"
+  case TagType_BINARY: return "BINARY"
+  }
+  return "<UNSET>"
+}
+
+func TagTypeFromString(s string) (TagType, error) {
+  switch s {
+  case "STRING": return TagType_STRING, nil 
+  case "DOUBLE": return TagType_DOUBLE, nil 
+  case "BOOL": return TagType_BOOL, nil 
+  case "LONG": return TagType_LONG, nil 
+  case "BINARY": return TagType_BINARY, nil 
+  }
+  return TagType(0), fmt.Errorf("not a valid TagType string")
+}
+
+
+func TagTypePtr(v TagType) *TagType { return &v }
+
+func (p TagType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *TagType) UnmarshalText(text []byte) error {
+q, err := TagTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *TagType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = TagType(v)
+return nil
+}
+
+func (p * TagType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+type SpanRefType int64
+const (
+  SpanRefType_CHILD_OF SpanRefType = 0
+  SpanRefType_FOLLOWS_FROM SpanRefType = 1
+)
+
+func (p SpanRefType) String() string {
+  switch p {
+  case SpanRefType_CHILD_OF: return "CHILD_OF"
+  case SpanRefType_FOLLOWS_FROM: return "FOLLOWS_FROM"
+  }
+  return "<UNSET>"
+}
+
+func SpanRefTypeFromString(s string) (SpanRefType, error) {
+  switch s {
+  case "CHILD_OF": return SpanRefType_CHILD_OF, nil 
+  case "FOLLOWS_FROM": return SpanRefType_FOLLOWS_FROM, nil 
+  }
+  return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string")
+}
+
+
+func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v }
+
+func (p SpanRefType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SpanRefType) UnmarshalText(text []byte) error {
+q, err := SpanRefTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SpanRefType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SpanRefType(v)
+return nil
+}
+
+func (p * SpanRefType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Attributes:
+//  - Key
+//  - VType
+//  - VStr
+//  - VDouble
+//  - VBool
+//  - VLong
+//  - VBinary
+type Tag struct {
+  Key string `thrift:"key,1,required" db:"key" json:"key"`
+  VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"`
+  VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"`
+  VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"`
+  VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"`
+  VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"`
+  VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"`
+}
+
+func NewTag() *Tag {
+  return &Tag{}
+}
+
+
+func (p *Tag) GetKey() string {
+  return p.Key
+}
+
+func (p *Tag) GetVType() TagType {
+  return p.VType
+}
+var Tag_VStr_DEFAULT string
+func (p *Tag) GetVStr() string {
+  if !p.IsSetVStr() {
+    return Tag_VStr_DEFAULT
+  }
+return *p.VStr
+}
+var Tag_VDouble_DEFAULT float64
+func (p *Tag) GetVDouble() float64 {
+  if !p.IsSetVDouble() {
+    return Tag_VDouble_DEFAULT
+  }
+return *p.VDouble
+}
+var Tag_VBool_DEFAULT bool
+func (p *Tag) GetVBool() bool {
+  if !p.IsSetVBool() {
+    return Tag_VBool_DEFAULT
+  }
+return *p.VBool
+}
+var Tag_VLong_DEFAULT int64
+func (p *Tag) GetVLong() int64 {
+  if !p.IsSetVLong() {
+    return Tag_VLong_DEFAULT
+  }
+return *p.VLong
+}
+var Tag_VBinary_DEFAULT []byte
+
+func (p *Tag) GetVBinary() []byte {
+  return p.VBinary
+}
+func (p *Tag) IsSetVStr() bool {
+  return p.VStr != nil
+}
+
+func (p *Tag) IsSetVDouble() bool {
+  return p.VDouble != nil
+}
+
+func (p *Tag) IsSetVBool() bool {
+  return p.VBool != nil
+}
+
+func (p *Tag) IsSetVLong() bool {
+  return p.VLong != nil
+}
+
+func (p *Tag) IsSetVBinary() bool {
+  return p.VBinary != nil
+}
+
+func (p *Tag) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetKey bool = false;
+  var issetVType bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetKey = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetVType = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField5(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField6(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 7:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField7(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetKey{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set"));
+  }
+  if !issetVType{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set"));
+  }
+  return nil
+}
+
+func (p *Tag)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Key = v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  temp := TagType(v)
+  p.VType = temp
+}
+  return nil
+}
+
+func (p *Tag)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.VStr = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.VDouble = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.VBool = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 6: ", err)
+} else {
+  p.VLong = &v
+}
+  return nil
+}
+
+func (p *Tag)  ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(ctx); err != nil {
+  return thrift.PrependError("error reading field 7: ", err)
+} else {
+  p.VBinary = v
+}
+  return nil
+}
+
+func (p *Tag) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Tag"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+    if err := p.writeField5(ctx, oprot); err != nil { return err }
+    if err := p.writeField6(ctx, oprot); err != nil { return err }
+    if err := p.writeField7(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Tag) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+  return err
+}
+
+func (p *Tag) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "vType", thrift.I32, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.VType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) }
+  return err
+}
+
+func (p *Tag) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVStr() {
+    if err := oprot.WriteFieldBegin(ctx, "vStr", thrift.STRING, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) }
+    if err := oprot.WriteString(ctx, string(*p.VStr)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVDouble() {
+    if err := oprot.WriteFieldBegin(ctx, "vDouble", thrift.DOUBLE, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) }
+    if err := oprot.WriteDouble(ctx, float64(*p.VDouble)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVBool() {
+    if err := oprot.WriteFieldBegin(ctx, "vBool", thrift.BOOL, 5); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) }
+    if err := oprot.WriteBool(ctx, bool(*p.VBool)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVLong() {
+    if err := oprot.WriteFieldBegin(ctx, "vLong", thrift.I64, 6); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.VLong)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetVBinary() {
+    if err := oprot.WriteFieldBegin(ctx, "vBinary", thrift.STRING, 7); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) }
+    if err := oprot.WriteBinary(ctx, p.VBinary); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) }
+  }
+  return err
+}
+
+func (p *Tag) Equals(other *Tag) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Key != other.Key { return false }
+  if p.VType != other.VType { return false }
+  if p.VStr != other.VStr {
+    if p.VStr == nil || other.VStr == nil {
+      return false
+    }
+    if (*p.VStr) != (*other.VStr) { return false }
+  }
+  if p.VDouble != other.VDouble {
+    if p.VDouble == nil || other.VDouble == nil {
+      return false
+    }
+    if (*p.VDouble) != (*other.VDouble) { return false }
+  }
+  if p.VBool != other.VBool {
+    if p.VBool == nil || other.VBool == nil {
+      return false
+    }
+    if (*p.VBool) != (*other.VBool) { return false }
+  }
+  if p.VLong != other.VLong {
+    if p.VLong == nil || other.VLong == nil {
+      return false
+    }
+    if (*p.VLong) != (*other.VLong) { return false }
+  }
+  if bytes.Compare(p.VBinary, other.VBinary) != 0 { return false }
+  return true
+}
+
+func (p *Tag) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Tag(%+v)", *p)
+}
+
+// Attributes:
+//  - Timestamp
+//  - Fields
+type Log struct {
+  Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"`
+  Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"`
+}
+
+func NewLog() *Log {
+  return &Log{}
+}
+
+
+func (p *Log) GetTimestamp() int64 {
+  return p.Timestamp
+}
+
+func (p *Log) GetFields() []*Tag {
+  return p.Fields
+}
+func (p *Log) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetTimestamp bool = false;
+  var issetFields bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetTimestamp = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetFields = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetTimestamp{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set"));
+  }
+  if !issetFields{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set"));
+  }
+  return nil
+}
+
+func (p *Log)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Timestamp = v
+}
+  return nil
+}
+
+func (p *Log)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Tag, 0, size)
+  p.Fields =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &Tag{}
+    if err := _elem0.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.Fields = append(p.Fields, _elem0)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Log) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Log"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Log) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+  return err
+}
+
+func (p *Log) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "fields", thrift.LIST, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Fields)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Fields {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) }
+  return err
+}
+
+func (p *Log) Equals(other *Log) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Timestamp != other.Timestamp { return false }
+  if len(p.Fields) != len(other.Fields) { return false }
+  for i, _tgt := range p.Fields {
+    _src1 := other.Fields[i]
+    if !_tgt.Equals(_src1) { return false }
+  }
+  return true
+}
+
+func (p *Log) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Log(%+v)", *p)
+}
+
+// Attributes:
+//  - RefType
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+type SpanRef struct {
+  RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"`
+  TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"`
+  TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"`
+  SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"`
+}
+
+func NewSpanRef() *SpanRef {
+  return &SpanRef{}
+}
+
+
+func (p *SpanRef) GetRefType() SpanRefType {
+  return p.RefType
+}
+
+func (p *SpanRef) GetTraceIdLow() int64 {
+  return p.TraceIdLow
+}
+
+func (p *SpanRef) GetTraceIdHigh() int64 {
+  return p.TraceIdHigh
+}
+
+func (p *SpanRef) GetSpanId() int64 {
+  return p.SpanId
+}
+func (p *SpanRef) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetRefType bool = false;
+  var issetTraceIdLow bool = false;
+  var issetTraceIdHigh bool = false;
+  var issetSpanId bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetRefType = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdLow = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdHigh = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+        issetSpanId = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetRefType{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set"));
+  }
+  if !issetTraceIdLow{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+  }
+  if !issetTraceIdHigh{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+  }
+  if !issetSpanId{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+  }
+  return nil
+}
+
+func (p *SpanRef)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  temp := SpanRefType(v)
+  p.RefType = temp
+}
+  return nil
+}
+
+func (p *SpanRef)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.TraceIdLow = v
+}
+  return nil
+}
+
+func (p *SpanRef)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.TraceIdHigh = v
+}
+  return nil
+}
+
+func (p *SpanRef)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.SpanId = v
+}
+  return nil
+}
+
+func (p *SpanRef) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "SpanRef"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SpanRef) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "refType", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.RefType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) }
+  return err
+}
+
+func (p *SpanRef) Equals(other *SpanRef) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.RefType != other.RefType { return false }
+  if p.TraceIdLow != other.TraceIdLow { return false }
+  if p.TraceIdHigh != other.TraceIdHigh { return false }
+  if p.SpanId != other.SpanId { return false }
+  return true
+}
+
+func (p *SpanRef) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SpanRef(%+v)", *p)
+}
+
+// Attributes:
+//  - TraceIdLow
+//  - TraceIdHigh
+//  - SpanId
+//  - ParentSpanId
+//  - OperationName
+//  - References
+//  - Flags
+//  - StartTime
+//  - Duration
+//  - Tags
+//  - Logs
+type Span struct {
+  TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"`
+  TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"`
+  SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"`
+  ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"`
+  OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"`
+  References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"`
+  Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"`
+  StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"`
+  Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"`
+  Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"`
+  Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"`
+}
+
+func NewSpan() *Span {
+  return &Span{}
+}
+
+
+func (p *Span) GetTraceIdLow() int64 {
+  return p.TraceIdLow
+}
+
+func (p *Span) GetTraceIdHigh() int64 {
+  return p.TraceIdHigh
+}
+
+func (p *Span) GetSpanId() int64 {
+  return p.SpanId
+}
+
+func (p *Span) GetParentSpanId() int64 {
+  return p.ParentSpanId
+}
+
+func (p *Span) GetOperationName() string {
+  return p.OperationName
+}
+var Span_References_DEFAULT []*SpanRef
+
+func (p *Span) GetReferences() []*SpanRef {
+  return p.References
+}
+
+func (p *Span) GetFlags() int32 {
+  return p.Flags
+}
+
+func (p *Span) GetStartTime() int64 {
+  return p.StartTime
+}
+
+func (p *Span) GetDuration() int64 {
+  return p.Duration
+}
+var Span_Tags_DEFAULT []*Tag
+
+func (p *Span) GetTags() []*Tag {
+  return p.Tags
+}
+var Span_Logs_DEFAULT []*Log
+
+func (p *Span) GetLogs() []*Log {
+  return p.Logs
+}
+func (p *Span) IsSetReferences() bool {
+  return p.References != nil
+}
+
+func (p *Span) IsSetTags() bool {
+  return p.Tags != nil
+}
+
+func (p *Span) IsSetLogs() bool {
+  return p.Logs != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetTraceIdLow bool = false;
+  var issetTraceIdHigh bool = false;
+  var issetSpanId bool = false;
+  var issetParentSpanId bool = false;
+  var issetOperationName bool = false;
+  var issetFlags bool = false;
+  var issetStartTime bool = false;
+  var issetDuration bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdLow = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetTraceIdHigh = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetSpanId = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+        issetParentSpanId = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField5(ctx, iprot); err != nil {
+          return err
+        }
+        issetOperationName = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField6(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 7:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField7(ctx, iprot); err != nil {
+          return err
+        }
+        issetFlags = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 8:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField8(ctx, iprot); err != nil {
+          return err
+        }
+        issetStartTime = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 9:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField9(ctx, iprot); err != nil {
+          return err
+        }
+        issetDuration = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 10:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField10(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 11:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField11(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetTraceIdLow{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set"));
+  }
+  if !issetTraceIdHigh{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set"));
+  }
+  if !issetSpanId{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set"));
+  }
+  if !issetParentSpanId{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set"));
+  }
+  if !issetOperationName{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set"));
+  }
+  if !issetFlags{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set"));
+  }
+  if !issetStartTime{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set"));
+  }
+  if !issetDuration{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set"));
+  }
+  return nil
+}
+
+func (p *Span)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.TraceIdLow = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.TraceIdHigh = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.SpanId = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.ParentSpanId = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.OperationName = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*SpanRef, 0, size)
+  p.References =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem2 := &SpanRef{}
+    if err := _elem2.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err)
+    }
+    p.References = append(p.References, _elem2)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField7(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 7: ", err)
+} else {
+  p.Flags = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 8: ", err)
+} else {
+  p.StartTime = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 9: ", err)
+} else {
+  p.Duration = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Tag, 0, size)
+  p.Tags =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem3 := &Tag{}
+    if err := _elem3.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err)
+    }
+    p.Tags = append(p.Tags, _elem3)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Log, 0, size)
+  p.Logs =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem4 := &Log{}
+    if err := _elem4.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
+    }
+    p.Logs = append(p.Logs, _elem4)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+    if err := p.writeField5(ctx, oprot); err != nil { return err }
+    if err := p.writeField6(ctx, oprot); err != nil { return err }
+    if err := p.writeField7(ctx, oprot); err != nil { return err }
+    if err := p.writeField8(ctx, oprot); err != nil { return err }
+    if err := p.writeField9(ctx, oprot); err != nil { return err }
+    if err := p.writeField10(ctx, oprot); err != nil { return err }
+    if err := p.writeField11(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdLow", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdLow)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "traceIdHigh", thrift.I64, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceIdHigh)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spanId", thrift.I64, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.SpanId)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "parentSpanId", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.ParentSpanId)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "operationName", thrift.STRING, 5); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.OperationName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetReferences() {
+    if err := oprot.WriteFieldBegin(ctx, "references", thrift.LIST, 6); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.References)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.References {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField7(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "flags", thrift.I32, 7); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.Flags)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "startTime", thrift.I64, 8); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.StartTime)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 9); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.Duration)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTags() {
+    if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 10); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Tags {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetLogs() {
+    if err := oprot.WriteFieldBegin(ctx, "logs", thrift.LIST, 11); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Logs)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Logs {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.TraceIdLow != other.TraceIdLow { return false }
+  if p.TraceIdHigh != other.TraceIdHigh { return false }
+  if p.SpanId != other.SpanId { return false }
+  if p.ParentSpanId != other.ParentSpanId { return false }
+  if p.OperationName != other.OperationName { return false }
+  if len(p.References) != len(other.References) { return false }
+  for i, _tgt := range p.References {
+    _src5 := other.References[i]
+    if !_tgt.Equals(_src5) { return false }
+  }
+  if p.Flags != other.Flags { return false }
+  if p.StartTime != other.StartTime { return false }
+  if p.Duration != other.Duration { return false }
+  if len(p.Tags) != len(other.Tags) { return false }
+  for i, _tgt := range p.Tags {
+    _src6 := other.Tags[i]
+    if !_tgt.Equals(_src6) { return false }
+  }
+  if len(p.Logs) != len(other.Logs) { return false }
+  for i, _tgt := range p.Logs {
+    _src7 := other.Logs[i]
+    if !_tgt.Equals(_src7) { return false }
+  }
+  return true
+}
+
+func (p *Span) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - ServiceName
+//  - Tags
+type Process struct {
+  ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"`
+  Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"`
+}
+
+func NewProcess() *Process {
+  return &Process{}
+}
+
+
+func (p *Process) GetServiceName() string {
+  return p.ServiceName
+}
+var Process_Tags_DEFAULT []*Tag
+
+func (p *Process) GetTags() []*Tag {
+  return p.Tags
+}
+func (p *Process) IsSetTags() bool {
+  return p.Tags != nil
+}
+
+func (p *Process) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetServiceName bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetServiceName = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetServiceName{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set"));
+  }
+  return nil
+}
+
+func (p *Process)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *Process)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Tag, 0, size)
+  p.Tags =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem8 := &Tag{}
+    if err := _elem8.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem8), err)
+    }
+    p.Tags = append(p.Tags, _elem8)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Process) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Process"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Process) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+  return err
+}
+
+func (p *Process) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTags() {
+    if err := oprot.WriteFieldBegin(ctx, "tags", thrift.LIST, 2); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Tags)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Tags {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) }
+  }
+  return err
+}
+
+func (p *Process) Equals(other *Process) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.ServiceName != other.ServiceName { return false }
+  if len(p.Tags) != len(other.Tags) { return false }
+  for i, _tgt := range p.Tags {
+    _src9 := other.Tags[i]
+    if !_tgt.Equals(_src9) { return false }
+  }
+  return true
+}
+
+func (p *Process) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Process(%+v)", *p)
+}
+
+// Attributes:
+//  - FullQueueDroppedSpans
+//  - TooLargeDroppedSpans
+//  - FailedToEmitSpans
+type ClientStats struct {
+  FullQueueDroppedSpans int64 `thrift:"fullQueueDroppedSpans,1,required" db:"fullQueueDroppedSpans" json:"fullQueueDroppedSpans"`
+  TooLargeDroppedSpans int64 `thrift:"tooLargeDroppedSpans,2,required" db:"tooLargeDroppedSpans" json:"tooLargeDroppedSpans"`
+  FailedToEmitSpans int64 `thrift:"failedToEmitSpans,3,required" db:"failedToEmitSpans" json:"failedToEmitSpans"`
+}
+
+func NewClientStats() *ClientStats {
+  return &ClientStats{}
+}
+
+
+func (p *ClientStats) GetFullQueueDroppedSpans() int64 {
+  return p.FullQueueDroppedSpans
+}
+
+func (p *ClientStats) GetTooLargeDroppedSpans() int64 {
+  return p.TooLargeDroppedSpans
+}
+
+func (p *ClientStats) GetFailedToEmitSpans() int64 {
+  return p.FailedToEmitSpans
+}
+func (p *ClientStats) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetFullQueueDroppedSpans bool = false;
+  var issetTooLargeDroppedSpans bool = false;
+  var issetFailedToEmitSpans bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetFullQueueDroppedSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetTooLargeDroppedSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetFailedToEmitSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetFullQueueDroppedSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FullQueueDroppedSpans is not set"));
+  }
+  if !issetTooLargeDroppedSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TooLargeDroppedSpans is not set"));
+  }
+  if !issetFailedToEmitSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field FailedToEmitSpans is not set"));
+  }
+  return nil
+}
+
+func (p *ClientStats)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.FullQueueDroppedSpans = v
+}
+  return nil
+}
+
+func (p *ClientStats)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.TooLargeDroppedSpans = v
+}
+  return nil
+}
+
+func (p *ClientStats)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.FailedToEmitSpans = v
+}
+  return nil
+}
+
+func (p *ClientStats) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "ClientStats"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ClientStats) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "fullQueueDroppedSpans", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:fullQueueDroppedSpans: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.FullQueueDroppedSpans)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.fullQueueDroppedSpans (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:fullQueueDroppedSpans: ", p), err) }
+  return err
+}
+
+func (p *ClientStats) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "tooLargeDroppedSpans", thrift.I64, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tooLargeDroppedSpans: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TooLargeDroppedSpans)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.tooLargeDroppedSpans (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tooLargeDroppedSpans: ", p), err) }
+  return err
+}
+
+func (p *ClientStats) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "failedToEmitSpans", thrift.I64, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:failedToEmitSpans: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.FailedToEmitSpans)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.failedToEmitSpans (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:failedToEmitSpans: ", p), err) }
+  return err
+}
+
+func (p *ClientStats) Equals(other *ClientStats) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.FullQueueDroppedSpans != other.FullQueueDroppedSpans { return false }
+  if p.TooLargeDroppedSpans != other.TooLargeDroppedSpans { return false }
+  if p.FailedToEmitSpans != other.FailedToEmitSpans { return false }
+  return true
+}
+
+func (p *ClientStats) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ClientStats(%+v)", *p)
+}
+
+// Attributes:
+//  - Process
+//  - Spans
+//  - SeqNo
+//  - Stats
+type Batch struct {
+  Process *Process `thrift:"process,1,required" db:"process" json:"process"`
+  Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"`
+  SeqNo *int64 `thrift:"seqNo,3" db:"seqNo" json:"seqNo,omitempty"`
+  Stats *ClientStats `thrift:"stats,4" db:"stats" json:"stats,omitempty"`
+}
+
+func NewBatch() *Batch {
+  return &Batch{}
+}
+
+var Batch_Process_DEFAULT *Process
+func (p *Batch) GetProcess() *Process {
+  if !p.IsSetProcess() {
+    return Batch_Process_DEFAULT
+  }
+return p.Process
+}
+
+func (p *Batch) GetSpans() []*Span {
+  return p.Spans
+}
+var Batch_SeqNo_DEFAULT int64
+func (p *Batch) GetSeqNo() int64 {
+  if !p.IsSetSeqNo() {
+    return Batch_SeqNo_DEFAULT
+  }
+return *p.SeqNo
+}
+var Batch_Stats_DEFAULT *ClientStats
+func (p *Batch) GetStats() *ClientStats {
+  if !p.IsSetStats() {
+    return Batch_Stats_DEFAULT
+  }
+return p.Stats
+}
+func (p *Batch) IsSetProcess() bool {
+  return p.Process != nil
+}
+
+func (p *Batch) IsSetSeqNo() bool {
+  return p.SeqNo != nil
+}
+
+func (p *Batch) IsSetStats() bool {
+  return p.Stats != nil
+}
+
+func (p *Batch) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetProcess bool = false;
+  var issetSpans bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetProcess = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetSpans = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetProcess{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set"));
+  }
+  if !issetSpans{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set"));
+  }
+  return nil
+}
+
+func (p *Batch)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Process = &Process{}
+  if err := p.Process.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err)
+  }
+  return nil
+}
+
+func (p *Batch)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Span, 0, size)
+  p.Spans =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem10 := &Span{}
+    if err := _elem10.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+    }
+    p.Spans = append(p.Spans, _elem10)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Batch)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.SeqNo = &v
+}
+  return nil
+}
+
+func (p *Batch)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Stats = &ClientStats{}
+  if err := p.Stats.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Stats), err)
+  }
+  return nil
+}
+
+func (p *Batch) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Batch"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Batch) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "process", thrift.STRUCT, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) }
+  if err := p.Process.Write(ctx, oprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) }
+  return err
+}
+
+func (p *Batch) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Spans {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) }
+  return err
+}
+
+func (p *Batch) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSeqNo() {
+    if err := oprot.WriteFieldBegin(ctx, "seqNo", thrift.I64, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:seqNo: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.SeqNo)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.seqNo (3) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:seqNo: ", p), err) }
+  }
+  return err
+}
+
+func (p *Batch) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetStats() {
+    if err := oprot.WriteFieldBegin(ctx, "stats", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:stats: ", p), err) }
+    if err := p.Stats.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Stats), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:stats: ", p), err) }
+  }
+  return err
+}
+
+func (p *Batch) Equals(other *Batch) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if !p.Process.Equals(other.Process) { return false }
+  if len(p.Spans) != len(other.Spans) { return false }
+  for i, _tgt := range p.Spans {
+    _src11 := other.Spans[i]
+    if !_tgt.Equals(_src11) { return false }
+  }
+  if p.SeqNo != other.SeqNo {
+    if p.SeqNo == nil || other.SeqNo == nil {
+      return false
+    }
+    if (*p.SeqNo) != (*other.SeqNo) { return false }
+  }
+  if !p.Stats.Equals(other.Stats) { return false }
+  return true
+}
+
+func (p *Batch) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Batch(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type BatchSubmitResponse struct {
+  Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewBatchSubmitResponse() *BatchSubmitResponse {
+  return &BatchSubmitResponse{}
+}
+
+
+func (p *BatchSubmitResponse) GetOk() bool {
+  return p.Ok
+}
+func (p *BatchSubmitResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetOk bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetOk = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetOk{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+  }
+  return nil
+}
+
+func (p *BatchSubmitResponse)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ok = v
+}
+  return nil
+}
+
+func (p *BatchSubmitResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "BatchSubmitResponse"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BatchSubmitResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+  if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+  return err
+}
+
+func (p *BatchSubmitResponse) Equals(other *BatchSubmitResponse) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Ok != other.Ok { return false }
+  return true
+}
+
+func (p *BatchSubmitResponse) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BatchSubmitResponse(%+v)", *p)
+}
+
+type Collector interface {
+  // Parameters:
+  //  - Batches
+  SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error)
+}
+
+type CollectorClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient {
+  return &CollectorClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient {
+  return &CollectorClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewCollectorClient(c thrift.TClient) *CollectorClient {
+  return &CollectorClient{
+    c: c,
+  }
+}
+
+func (p *CollectorClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *CollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *CollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - Batches
+func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (_r []*BatchSubmitResponse, _err error) {
+  var _args12 CollectorSubmitBatchesArgs
+  _args12.Batches = batches
+  var _result14 CollectorSubmitBatchesResult
+  var _meta13 thrift.ResponseMeta
+  _meta13, _err = p.Client_().Call(ctx, "submitBatches", &_args12, &_result14)
+  p.SetLastResponseMeta_(_meta13)
+  if _err != nil {
+    return
+  }
+  return _result14.GetSuccess(), nil
+}
+
+type CollectorProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler Collector
+}
+
+func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewCollectorProcessor(handler Collector) *CollectorProcessor {
+
+  self15 := &CollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self15.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler:handler}
+return self15
+}
+
+func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x16 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x16.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x16
+
+}
+
+type collectorProcessorSubmitBatches struct {
+  handler Collector
+}
+
+func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := CollectorSubmitBatchesArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := CollectorSubmitBatchesResult{}
+  var retval []*BatchSubmitResponse
+  if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitBatches", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "submitBatches", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Batches
+type CollectorSubmitBatchesArgs struct {
+  Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"`
+}
+
+func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs {
+  return &CollectorSubmitBatchesArgs{}
+}
+
+
+func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch {
+  return p.Batches
+}
+func (p *CollectorSubmitBatchesArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Batch, 0, size)
+  p.Batches =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem17 := &Batch{}
+    if err := _elem17.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem17), err)
+    }
+    p.Batches = append(p.Batches, _elem17)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitBatches_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "batches", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Batches)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Batches {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) }
+  return err
+}
+
+func (p *CollectorSubmitBatchesArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type CollectorSubmitBatchesResult struct {
+  Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult {
+  return &CollectorSubmitBatchesResult{}
+}
+
+var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse
+
+func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse {
+  return p.Success
+}
+func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *CollectorSubmitBatchesResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BatchSubmitResponse, 0, size)
+  p.Success =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem18 := &BatchSubmitResponse{}
+    if err := _elem18.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem18), err)
+    }
+    p.Success = append(p.Success, _elem18)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitBatches_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *CollectorSubmitBatchesResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Success {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *CollectorSubmitBatchesResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
new file mode 100644
index 0000000..015ad4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
new file mode 100644
index 0000000..5cc7628
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling-consts.go
@@ -0,0 +1,23 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
new file mode 100644
index 0000000..3bffa5b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/sampling/sampling.go
@@ -0,0 +1,1323 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package sampling
+
+import(
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type SamplingStrategyType int64
+const (
+  SamplingStrategyType_PROBABILISTIC SamplingStrategyType = 0
+  SamplingStrategyType_RATE_LIMITING SamplingStrategyType = 1
+)
+
+func (p SamplingStrategyType) String() string {
+  switch p {
+  case SamplingStrategyType_PROBABILISTIC: return "PROBABILISTIC"
+  case SamplingStrategyType_RATE_LIMITING: return "RATE_LIMITING"
+  }
+  return "<UNSET>"
+}
+
+func SamplingStrategyTypeFromString(s string) (SamplingStrategyType, error) {
+  switch s {
+  case "PROBABILISTIC": return SamplingStrategyType_PROBABILISTIC, nil 
+  case "RATE_LIMITING": return SamplingStrategyType_RATE_LIMITING, nil 
+  }
+  return SamplingStrategyType(0), fmt.Errorf("not a valid SamplingStrategyType string")
+}
+
+
+func SamplingStrategyTypePtr(v SamplingStrategyType) *SamplingStrategyType { return &v }
+
+func (p SamplingStrategyType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *SamplingStrategyType) UnmarshalText(text []byte) error {
+q, err := SamplingStrategyTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *SamplingStrategyType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = SamplingStrategyType(v)
+return nil
+}
+
+func (p * SamplingStrategyType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Attributes:
+//  - SamplingRate
+type ProbabilisticSamplingStrategy struct {
+  SamplingRate float64 `thrift:"samplingRate,1,required" db:"samplingRate" json:"samplingRate"`
+}
+
+func NewProbabilisticSamplingStrategy() *ProbabilisticSamplingStrategy {
+  return &ProbabilisticSamplingStrategy{}
+}
+
+
+func (p *ProbabilisticSamplingStrategy) GetSamplingRate() float64 {
+  return p.SamplingRate
+}
+func (p *ProbabilisticSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetSamplingRate bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetSamplingRate = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetSamplingRate{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SamplingRate is not set"));
+  }
+  return nil
+}
+
+func (p *ProbabilisticSamplingStrategy)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.SamplingRate = v
+}
+  return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "ProbabilisticSamplingStrategy"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ProbabilisticSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "samplingRate", thrift.DOUBLE, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:samplingRate: ", p), err) }
+  if err := oprot.WriteDouble(ctx, float64(p.SamplingRate)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.samplingRate (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:samplingRate: ", p), err) }
+  return err
+}
+
+func (p *ProbabilisticSamplingStrategy) Equals(other *ProbabilisticSamplingStrategy) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.SamplingRate != other.SamplingRate { return false }
+  return true
+}
+
+func (p *ProbabilisticSamplingStrategy) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ProbabilisticSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - MaxTracesPerSecond
+type RateLimitingSamplingStrategy struct {
+  MaxTracesPerSecond int16 `thrift:"maxTracesPerSecond,1,required" db:"maxTracesPerSecond" json:"maxTracesPerSecond"`
+}
+
+func NewRateLimitingSamplingStrategy() *RateLimitingSamplingStrategy {
+  return &RateLimitingSamplingStrategy{}
+}
+
+
+func (p *RateLimitingSamplingStrategy) GetMaxTracesPerSecond() int16 {
+  return p.MaxTracesPerSecond
+}
+func (p *RateLimitingSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetMaxTracesPerSecond bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I16 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetMaxTracesPerSecond = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetMaxTracesPerSecond{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field MaxTracesPerSecond is not set"));
+  }
+  return nil
+}
+
+func (p *RateLimitingSamplingStrategy)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI16(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.MaxTracesPerSecond = v
+}
+  return nil
+}
+
+func (p *RateLimitingSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "RateLimitingSamplingStrategy"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *RateLimitingSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "maxTracesPerSecond", thrift.I16, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:maxTracesPerSecond: ", p), err) }
+  if err := oprot.WriteI16(ctx, int16(p.MaxTracesPerSecond)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.maxTracesPerSecond (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:maxTracesPerSecond: ", p), err) }
+  return err
+}
+
+func (p *RateLimitingSamplingStrategy) Equals(other *RateLimitingSamplingStrategy) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.MaxTracesPerSecond != other.MaxTracesPerSecond { return false }
+  return true
+}
+
+func (p *RateLimitingSamplingStrategy) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("RateLimitingSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - Operation
+//  - ProbabilisticSampling
+type OperationSamplingStrategy struct {
+  Operation string `thrift:"operation,1,required" db:"operation" json:"operation"`
+  ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2,required" db:"probabilisticSampling" json:"probabilisticSampling"`
+}
+
+func NewOperationSamplingStrategy() *OperationSamplingStrategy {
+  return &OperationSamplingStrategy{}
+}
+
+
+func (p *OperationSamplingStrategy) GetOperation() string {
+  return p.Operation
+}
+var OperationSamplingStrategy_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+func (p *OperationSamplingStrategy) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+  if !p.IsSetProbabilisticSampling() {
+    return OperationSamplingStrategy_ProbabilisticSampling_DEFAULT
+  }
+return p.ProbabilisticSampling
+}
+func (p *OperationSamplingStrategy) IsSetProbabilisticSampling() bool {
+  return p.ProbabilisticSampling != nil
+}
+
+func (p *OperationSamplingStrategy) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetOperation bool = false;
+  var issetProbabilisticSampling bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetOperation = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetProbabilisticSampling = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetOperation{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Operation is not set"));
+  }
+  if !issetProbabilisticSampling{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ProbabilisticSampling is not set"));
+  }
+  return nil
+}
+
+func (p *OperationSamplingStrategy)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Operation = v
+}
+  return nil
+}
+
+func (p *OperationSamplingStrategy)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+  if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+  }
+  return nil
+}
+
+func (p *OperationSamplingStrategy) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "OperationSamplingStrategy"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *OperationSamplingStrategy) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "operation", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:operation: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Operation)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.operation (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:operation: ", p), err) }
+  return err
+}
+
+func (p *OperationSamplingStrategy) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
+  if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
+  return err
+}
+
+func (p *OperationSamplingStrategy) Equals(other *OperationSamplingStrategy) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Operation != other.Operation { return false }
+  if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
+  return true
+}
+
+func (p *OperationSamplingStrategy) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("OperationSamplingStrategy(%+v)", *p)
+}
+
+// Attributes:
+//  - DefaultSamplingProbability
+//  - DefaultLowerBoundTracesPerSecond
+//  - PerOperationStrategies
+//  - DefaultUpperBoundTracesPerSecond
+type PerOperationSamplingStrategies struct {
+  DefaultSamplingProbability float64 `thrift:"defaultSamplingProbability,1,required" db:"defaultSamplingProbability" json:"defaultSamplingProbability"`
+  DefaultLowerBoundTracesPerSecond float64 `thrift:"defaultLowerBoundTracesPerSecond,2,required" db:"defaultLowerBoundTracesPerSecond" json:"defaultLowerBoundTracesPerSecond"`
+  PerOperationStrategies []*OperationSamplingStrategy `thrift:"perOperationStrategies,3,required" db:"perOperationStrategies" json:"perOperationStrategies"`
+  DefaultUpperBoundTracesPerSecond *float64 `thrift:"defaultUpperBoundTracesPerSecond,4" db:"defaultUpperBoundTracesPerSecond" json:"defaultUpperBoundTracesPerSecond,omitempty"`
+}
+
+func NewPerOperationSamplingStrategies() *PerOperationSamplingStrategies {
+  return &PerOperationSamplingStrategies{}
+}
+
+
+func (p *PerOperationSamplingStrategies) GetDefaultSamplingProbability() float64 {
+  return p.DefaultSamplingProbability
+}
+
+func (p *PerOperationSamplingStrategies) GetDefaultLowerBoundTracesPerSecond() float64 {
+  return p.DefaultLowerBoundTracesPerSecond
+}
+
+func (p *PerOperationSamplingStrategies) GetPerOperationStrategies() []*OperationSamplingStrategy {
+  return p.PerOperationStrategies
+}
+var PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT float64
+func (p *PerOperationSamplingStrategies) GetDefaultUpperBoundTracesPerSecond() float64 {
+  if !p.IsSetDefaultUpperBoundTracesPerSecond() {
+    return PerOperationSamplingStrategies_DefaultUpperBoundTracesPerSecond_DEFAULT
+  }
+return *p.DefaultUpperBoundTracesPerSecond
+}
+func (p *PerOperationSamplingStrategies) IsSetDefaultUpperBoundTracesPerSecond() bool {
+  return p.DefaultUpperBoundTracesPerSecond != nil
+}
+
+func (p *PerOperationSamplingStrategies) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetDefaultSamplingProbability bool = false;
+  var issetDefaultLowerBoundTracesPerSecond bool = false;
+  var issetPerOperationStrategies bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetDefaultSamplingProbability = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+        issetDefaultLowerBoundTracesPerSecond = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+        issetPerOperationStrategies = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.DOUBLE {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetDefaultSamplingProbability{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultSamplingProbability is not set"));
+  }
+  if !issetDefaultLowerBoundTracesPerSecond{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field DefaultLowerBoundTracesPerSecond is not set"));
+  }
+  if !issetPerOperationStrategies{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field PerOperationStrategies is not set"));
+  }
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.DefaultSamplingProbability = v
+}
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.DefaultLowerBoundTracesPerSecond = v
+}
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*OperationSamplingStrategy, 0, size)
+  p.PerOperationStrategies =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &OperationSamplingStrategy{}
+    if err := _elem0.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.PerOperationStrategies = append(p.PerOperationStrategies, _elem0)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadDouble(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.DefaultUpperBoundTracesPerSecond = &v
+}
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "PerOperationSamplingStrategies"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *PerOperationSamplingStrategies) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "defaultSamplingProbability", thrift.DOUBLE, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:defaultSamplingProbability: ", p), err) }
+  if err := oprot.WriteDouble(ctx, float64(p.DefaultSamplingProbability)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.defaultSamplingProbability (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:defaultSamplingProbability: ", p), err) }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "defaultLowerBoundTracesPerSecond", thrift.DOUBLE, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
+  if err := oprot.WriteDouble(ctx, float64(p.DefaultLowerBoundTracesPerSecond)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.defaultLowerBoundTracesPerSecond (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:defaultLowerBoundTracesPerSecond: ", p), err) }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "perOperationStrategies", thrift.LIST, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:perOperationStrategies: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.PerOperationStrategies)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.PerOperationStrategies {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:perOperationStrategies: ", p), err) }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetDefaultUpperBoundTracesPerSecond() {
+    if err := oprot.WriteFieldBegin(ctx, "defaultUpperBoundTracesPerSecond", thrift.DOUBLE, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
+    if err := oprot.WriteDouble(ctx, float64(*p.DefaultUpperBoundTracesPerSecond)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.defaultUpperBoundTracesPerSecond (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:defaultUpperBoundTracesPerSecond: ", p), err) }
+  }
+  return err
+}
+
+func (p *PerOperationSamplingStrategies) Equals(other *PerOperationSamplingStrategies) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.DefaultSamplingProbability != other.DefaultSamplingProbability { return false }
+  if p.DefaultLowerBoundTracesPerSecond != other.DefaultLowerBoundTracesPerSecond { return false }
+  if len(p.PerOperationStrategies) != len(other.PerOperationStrategies) { return false }
+  for i, _tgt := range p.PerOperationStrategies {
+    _src1 := other.PerOperationStrategies[i]
+    if !_tgt.Equals(_src1) { return false }
+  }
+  if p.DefaultUpperBoundTracesPerSecond != other.DefaultUpperBoundTracesPerSecond {
+    if p.DefaultUpperBoundTracesPerSecond == nil || other.DefaultUpperBoundTracesPerSecond == nil {
+      return false
+    }
+    if (*p.DefaultUpperBoundTracesPerSecond) != (*other.DefaultUpperBoundTracesPerSecond) { return false }
+  }
+  return true
+}
+
+func (p *PerOperationSamplingStrategies) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("PerOperationSamplingStrategies(%+v)", *p)
+}
+
+// Attributes:
+//  - StrategyType
+//  - ProbabilisticSampling
+//  - RateLimitingSampling
+//  - OperationSampling
+type SamplingStrategyResponse struct {
+  StrategyType SamplingStrategyType `thrift:"strategyType,1,required" db:"strategyType" json:"strategyType"`
+  ProbabilisticSampling *ProbabilisticSamplingStrategy `thrift:"probabilisticSampling,2" db:"probabilisticSampling" json:"probabilisticSampling,omitempty"`
+  RateLimitingSampling *RateLimitingSamplingStrategy `thrift:"rateLimitingSampling,3" db:"rateLimitingSampling" json:"rateLimitingSampling,omitempty"`
+  OperationSampling *PerOperationSamplingStrategies `thrift:"operationSampling,4" db:"operationSampling" json:"operationSampling,omitempty"`
+}
+
+func NewSamplingStrategyResponse() *SamplingStrategyResponse {
+  return &SamplingStrategyResponse{}
+}
+
+
+func (p *SamplingStrategyResponse) GetStrategyType() SamplingStrategyType {
+  return p.StrategyType
+}
+var SamplingStrategyResponse_ProbabilisticSampling_DEFAULT *ProbabilisticSamplingStrategy
+func (p *SamplingStrategyResponse) GetProbabilisticSampling() *ProbabilisticSamplingStrategy {
+  if !p.IsSetProbabilisticSampling() {
+    return SamplingStrategyResponse_ProbabilisticSampling_DEFAULT
+  }
+return p.ProbabilisticSampling
+}
+var SamplingStrategyResponse_RateLimitingSampling_DEFAULT *RateLimitingSamplingStrategy
+func (p *SamplingStrategyResponse) GetRateLimitingSampling() *RateLimitingSamplingStrategy {
+  if !p.IsSetRateLimitingSampling() {
+    return SamplingStrategyResponse_RateLimitingSampling_DEFAULT
+  }
+return p.RateLimitingSampling
+}
+var SamplingStrategyResponse_OperationSampling_DEFAULT *PerOperationSamplingStrategies
+func (p *SamplingStrategyResponse) GetOperationSampling() *PerOperationSamplingStrategies {
+  if !p.IsSetOperationSampling() {
+    return SamplingStrategyResponse_OperationSampling_DEFAULT
+  }
+return p.OperationSampling
+}
+func (p *SamplingStrategyResponse) IsSetProbabilisticSampling() bool {
+  return p.ProbabilisticSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetRateLimitingSampling() bool {
+  return p.RateLimitingSampling != nil
+}
+
+func (p *SamplingStrategyResponse) IsSetOperationSampling() bool {
+  return p.OperationSampling != nil
+}
+
+func (p *SamplingStrategyResponse) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetStrategyType bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetStrategyType = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetStrategyType{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StrategyType is not set"));
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  temp := SamplingStrategyType(v)
+  p.StrategyType = temp
+}
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  p.ProbabilisticSampling = &ProbabilisticSamplingStrategy{}
+  if err := p.ProbabilisticSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.ProbabilisticSampling), err)
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  p.RateLimitingSampling = &RateLimitingSamplingStrategy{}
+  if err := p.RateLimitingSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.RateLimitingSampling), err)
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  p.OperationSampling = &PerOperationSamplingStrategies{}
+  if err := p.OperationSampling.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.OperationSampling), err)
+  }
+  return nil
+}
+
+func (p *SamplingStrategyResponse) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "SamplingStrategyResponse"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SamplingStrategyResponse) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "strategyType", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:strategyType: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.StrategyType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.strategyType (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:strategyType: ", p), err) }
+  return err
+}
+
+func (p *SamplingStrategyResponse) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetProbabilisticSampling() {
+    if err := oprot.WriteFieldBegin(ctx, "probabilisticSampling", thrift.STRUCT, 2); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:probabilisticSampling: ", p), err) }
+    if err := p.ProbabilisticSampling.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.ProbabilisticSampling), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 2:probabilisticSampling: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingStrategyResponse) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetRateLimitingSampling() {
+    if err := oprot.WriteFieldBegin(ctx, "rateLimitingSampling", thrift.STRUCT, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:rateLimitingSampling: ", p), err) }
+    if err := p.RateLimitingSampling.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.RateLimitingSampling), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:rateLimitingSampling: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingStrategyResponse) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetOperationSampling() {
+    if err := oprot.WriteFieldBegin(ctx, "operationSampling", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:operationSampling: ", p), err) }
+    if err := p.OperationSampling.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.OperationSampling), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:operationSampling: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingStrategyResponse) Equals(other *SamplingStrategyResponse) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.StrategyType != other.StrategyType { return false }
+  if !p.ProbabilisticSampling.Equals(other.ProbabilisticSampling) { return false }
+  if !p.RateLimitingSampling.Equals(other.RateLimitingSampling) { return false }
+  if !p.OperationSampling.Equals(other.OperationSampling) { return false }
+  return true
+}
+
+func (p *SamplingStrategyResponse) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SamplingStrategyResponse(%+v)", *p)
+}
+
+type SamplingManager interface {
+  // Parameters:
+  //  - ServiceName
+  GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error)
+}
+
+type SamplingManagerClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewSamplingManagerClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *SamplingManagerClient {
+  return &SamplingManagerClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewSamplingManagerClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *SamplingManagerClient {
+  return &SamplingManagerClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewSamplingManagerClient(c thrift.TClient) *SamplingManagerClient {
+  return &SamplingManagerClient{
+    c: c,
+  }
+}
+
+func (p *SamplingManagerClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *SamplingManagerClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *SamplingManagerClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - ServiceName
+func (p *SamplingManagerClient) GetSamplingStrategy(ctx context.Context, serviceName string) (_r *SamplingStrategyResponse, _err error) {
+  var _args2 SamplingManagerGetSamplingStrategyArgs
+  _args2.ServiceName = serviceName
+  var _result4 SamplingManagerGetSamplingStrategyResult
+  var _meta3 thrift.ResponseMeta
+  _meta3, _err = p.Client_().Call(ctx, "getSamplingStrategy", &_args2, &_result4)
+  p.SetLastResponseMeta_(_meta3)
+  if _err != nil {
+    return
+  }
+  return _result4.GetSuccess(), nil
+}
+
+type SamplingManagerProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler SamplingManager
+}
+
+func (p *SamplingManagerProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *SamplingManagerProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *SamplingManagerProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewSamplingManagerProcessor(handler SamplingManager) *SamplingManagerProcessor {
+
+  self5 := &SamplingManagerProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self5.processorMap["getSamplingStrategy"] = &samplingManagerProcessorGetSamplingStrategy{handler:handler}
+return self5
+}
+
+func (p *SamplingManagerProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x6 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x6.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x6
+
+}
+
+type samplingManagerProcessorGetSamplingStrategy struct {
+  handler SamplingManager
+}
+
+func (p *samplingManagerProcessorGetSamplingStrategy) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := SamplingManagerGetSamplingStrategyArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := SamplingManagerGetSamplingStrategyResult{}
+  var retval *SamplingStrategyResponse
+  if retval, err2 = p.handler.GetSamplingStrategy(ctx, args.ServiceName); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing getSamplingStrategy: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "getSamplingStrategy", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - ServiceName
+type SamplingManagerGetSamplingStrategyArgs struct {
+  ServiceName string `thrift:"serviceName,1" db:"serviceName" json:"serviceName"`
+}
+
+func NewSamplingManagerGetSamplingStrategyArgs() *SamplingManagerGetSamplingStrategyArgs {
+  return &SamplingManagerGetSamplingStrategyArgs{}
+}
+
+
+func (p *SamplingManagerGetSamplingStrategyArgs) GetServiceName() string {
+  return p.ServiceName
+}
+func (p *SamplingManagerGetSamplingStrategyArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "serviceName", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) }
+  return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SamplingManagerGetSamplingStrategyArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type SamplingManagerGetSamplingStrategyResult struct {
+  Success *SamplingStrategyResponse `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewSamplingManagerGetSamplingStrategyResult() *SamplingManagerGetSamplingStrategyResult {
+  return &SamplingManagerGetSamplingStrategyResult{}
+}
+
+var SamplingManagerGetSamplingStrategyResult_Success_DEFAULT *SamplingStrategyResponse
+func (p *SamplingManagerGetSamplingStrategyResult) GetSuccess() *SamplingStrategyResponse {
+  if !p.IsSetSuccess() {
+    return SamplingManagerGetSamplingStrategyResult_Success_DEFAULT
+  }
+return p.Success
+}
+func (p *SamplingManagerGetSamplingStrategyResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Success = &SamplingStrategyResponse{}
+  if err := p.Success.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Success), err)
+  }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "getSamplingStrategy_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.STRUCT, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := p.Success.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Success), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *SamplingManagerGetSamplingStrategyResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("SamplingManagerGetSamplingStrategyResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
new file mode 100644
index 0000000..ebf4301
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/GoUnusedProtection__.go
@@ -0,0 +1,6 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+var GoUnusedProtection__ int;
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
new file mode 100644
index 0000000..7a924b9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore-consts.go
@@ -0,0 +1,39 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import(
+	"bytes"
+	"context"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+const CLIENT_SEND = "cs"
+const CLIENT_RECV = "cr"
+const SERVER_SEND = "ss"
+const SERVER_RECV = "sr"
+const MESSAGE_SEND = "ms"
+const MESSAGE_RECV = "mr"
+const WIRE_SEND = "ws"
+const WIRE_RECV = "wr"
+const CLIENT_SEND_FRAGMENT = "csf"
+const CLIENT_RECV_FRAGMENT = "crf"
+const SERVER_SEND_FRAGMENT = "ssf"
+const SERVER_RECV_FRAGMENT = "srf"
+const LOCAL_COMPONENT = "lc"
+const CLIENT_ADDR = "ca"
+const SERVER_ADDR = "sa"
+const MESSAGE_ADDR = "ma"
+
+func init() {
+}
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
new file mode 100644
index 0000000..b00ecd2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift-gen/zipkincore/zipkincore.go
@@ -0,0 +1,1853 @@
+// Code generated by Thrift Compiler (0.14.1). DO NOT EDIT.
+
+package zipkincore
+
+import(
+	"bytes"
+	"context"
+	"database/sql/driver"
+	"errors"
+	"fmt"
+	"time"
+	"github.com/uber/jaeger-client-go/thrift"
+)
+
+// (needed to ensure safety because of naive import list construction.)
+var _ = thrift.ZERO
+var _ = fmt.Printf
+var _ = context.Background
+var _ = time.Now
+var _ = bytes.Equal
+
+type AnnotationType int64
+const (
+  AnnotationType_BOOL AnnotationType = 0
+  AnnotationType_BYTES AnnotationType = 1
+  AnnotationType_I16 AnnotationType = 2
+  AnnotationType_I32 AnnotationType = 3
+  AnnotationType_I64 AnnotationType = 4
+  AnnotationType_DOUBLE AnnotationType = 5
+  AnnotationType_STRING AnnotationType = 6
+)
+
+func (p AnnotationType) String() string {
+  switch p {
+  case AnnotationType_BOOL: return "BOOL"
+  case AnnotationType_BYTES: return "BYTES"
+  case AnnotationType_I16: return "I16"
+  case AnnotationType_I32: return "I32"
+  case AnnotationType_I64: return "I64"
+  case AnnotationType_DOUBLE: return "DOUBLE"
+  case AnnotationType_STRING: return "STRING"
+  }
+  return "<UNSET>"
+}
+
+func AnnotationTypeFromString(s string) (AnnotationType, error) {
+  switch s {
+  case "BOOL": return AnnotationType_BOOL, nil 
+  case "BYTES": return AnnotationType_BYTES, nil 
+  case "I16": return AnnotationType_I16, nil 
+  case "I32": return AnnotationType_I32, nil 
+  case "I64": return AnnotationType_I64, nil 
+  case "DOUBLE": return AnnotationType_DOUBLE, nil 
+  case "STRING": return AnnotationType_STRING, nil 
+  }
+  return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
+}
+
+
+func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
+
+func (p AnnotationType) MarshalText() ([]byte, error) {
+return []byte(p.String()), nil
+}
+
+func (p *AnnotationType) UnmarshalText(text []byte) error {
+q, err := AnnotationTypeFromString(string(text))
+if (err != nil) {
+return err
+}
+*p = q
+return nil
+}
+
+func (p *AnnotationType) Scan(value interface{}) error {
+v, ok := value.(int64)
+if !ok {
+return errors.New("Scan value is not int64")
+}
+*p = AnnotationType(v)
+return nil
+}
+
+func (p * AnnotationType) Value() (driver.Value, error) {
+  if p == nil {
+    return nil, nil
+  }
+return int64(*p), nil
+}
+// Indicates the network context of a service recording an annotation with two
+// exceptions.
+// 
+// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
+// the endpoint indicates the source or destination of an RPC. This exception
+// allows zipkin to display network context of uninstrumented services, or
+// clients such as web browsers.
+// 
+// Attributes:
+//  - Ipv4: IPv4 host address packed into 4 bytes.
+// 
+// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
+//  - Port: IPv4 port
+// 
+// Note: this is to be treated as an unsigned integer, so watch for negatives.
+// 
+// Conventionally, when the port isn't known, port = 0.
+//  - ServiceName: Service name in lowercase, such as "memcache" or "zipkin-web"
+// 
+// Conventionally, when the service name isn't known, service_name = "unknown".
+//  - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
+type Endpoint struct {
+  Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"`
+  Port int16 `thrift:"port,2" db:"port" json:"port"`
+  ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"`
+  Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"`
+}
+
+func NewEndpoint() *Endpoint {
+  return &Endpoint{}
+}
+
+
+func (p *Endpoint) GetIpv4() int32 {
+  return p.Ipv4
+}
+
+func (p *Endpoint) GetPort() int16 {
+  return p.Port
+}
+
+func (p *Endpoint) GetServiceName() string {
+  return p.ServiceName
+}
+var Endpoint_Ipv6_DEFAULT []byte
+
+func (p *Endpoint) GetIpv6() []byte {
+  return p.Ipv6
+}
+func (p *Endpoint) IsSetIpv6() bool {
+  return p.Ipv6 != nil
+}
+
+func (p *Endpoint) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.I16 {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Endpoint)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ipv4 = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI16(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Port = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.ServiceName = v
+}
+  return nil
+}
+
+func (p *Endpoint)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.Ipv6 = v
+}
+  return nil
+}
+
+func (p *Endpoint) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Endpoint"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Endpoint) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "ipv4", thrift.I32, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.Ipv4)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "port", thrift.I16, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) }
+  if err := oprot.WriteI16(ctx, int16(p.Port)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "service_name", thrift.STRING, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.ServiceName)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) }
+  return err
+}
+
+func (p *Endpoint) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetIpv6() {
+    if err := oprot.WriteFieldBegin(ctx, "ipv6", thrift.STRING, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) }
+    if err := oprot.WriteBinary(ctx, p.Ipv6); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) }
+  }
+  return err
+}
+
+func (p *Endpoint) Equals(other *Endpoint) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Ipv4 != other.Ipv4 { return false }
+  if p.Port != other.Port { return false }
+  if p.ServiceName != other.ServiceName { return false }
+  if bytes.Compare(p.Ipv6, other.Ipv6) != 0 { return false }
+  return true
+}
+
+func (p *Endpoint) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Endpoint(%+v)", *p)
+}
+
+// An annotation is similar to a log statement. It includes a host field which
+// allows these events to be attributed properly, and also aggregatable.
+// 
+// Attributes:
+//  - Timestamp: Microseconds from epoch.
+// 
+// This value should use the most precise value possible. For example,
+// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
+//  - Value
+//  - Host: Always the host that recorded the event. By specifying the host you allow
+// rollup of all events (such as client requests to a service) by IP address.
+type Annotation struct {
+  Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"`
+  Value string `thrift:"value,2" db:"value" json:"value"`
+  Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"`
+}
+
+func NewAnnotation() *Annotation {
+  return &Annotation{}
+}
+
+
+func (p *Annotation) GetTimestamp() int64 {
+  return p.Timestamp
+}
+
+func (p *Annotation) GetValue() string {
+  return p.Value
+}
+var Annotation_Host_DEFAULT *Endpoint
+func (p *Annotation) GetHost() *Endpoint {
+  if !p.IsSetHost() {
+    return Annotation_Host_DEFAULT
+  }
+return p.Host
+}
+func (p *Annotation) IsSetHost() bool {
+  return p.Host != nil
+}
+
+func (p *Annotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Annotation)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Timestamp = v
+}
+  return nil
+}
+
+func (p *Annotation)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Value = v
+}
+  return nil
+}
+
+func (p *Annotation)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Host = &Endpoint{}
+  if err := p.Host.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+  }
+  return nil
+}
+
+func (p *Annotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Annotation"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Annotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.Timestamp)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) }
+  return err
+}
+
+func (p *Annotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Value)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+  return err
+}
+
+func (p *Annotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetHost() {
+    if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 3); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) }
+    if err := p.Host.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) }
+  }
+  return err
+}
+
+func (p *Annotation) Equals(other *Annotation) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Timestamp != other.Timestamp { return false }
+  if p.Value != other.Value { return false }
+  if !p.Host.Equals(other.Host) { return false }
+  return true
+}
+
+func (p *Annotation) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Annotation(%+v)", *p)
+}
+
+// Binary annotations are tags applied to a Span to give it context. For
+// example, a binary annotation of "http.uri" could the path to a resource in a
+// RPC call.
+// 
+// Binary annotations of type STRING are always queryable, though more a
+// historical implementation detail than a structural concern.
+// 
+// Binary annotations can repeat, and vary on the host. Similar to Annotation,
+// the host indicates who logged the event. This allows you to tell the
+// difference between the client and server side of the same key. For example,
+// the key "http.uri" might be different on the client and server side due to
+// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
+// you can see the different points of view, which often help in debugging.
+// 
+// Attributes:
+//  - Key
+//  - Value
+//  - AnnotationType
+//  - Host: The host that recorded tag, which allows you to differentiate between
+// multiple tags with the same key. There are two exceptions to this.
+// 
+// When the key is CLIENT_ADDR or SERVER_ADDR, host indicates the source or
+// destination of an RPC. This exception allows zipkin to display network
+// context of uninstrumented services, or clients such as web browsers.
+type BinaryAnnotation struct {
+  Key string `thrift:"key,1" db:"key" json:"key"`
+  Value []byte `thrift:"value,2" db:"value" json:"value"`
+  AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"`
+  Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"`
+}
+
+func NewBinaryAnnotation() *BinaryAnnotation {
+  return &BinaryAnnotation{}
+}
+
+
+func (p *BinaryAnnotation) GetKey() string {
+  return p.Key
+}
+
+func (p *BinaryAnnotation) GetValue() []byte {
+  return p.Value
+}
+
+func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
+  return p.AnnotationType
+}
+var BinaryAnnotation_Host_DEFAULT *Endpoint
+func (p *BinaryAnnotation) GetHost() *Endpoint {
+  if !p.IsSetHost() {
+    return BinaryAnnotation_Host_DEFAULT
+  }
+return p.Host
+}
+func (p *BinaryAnnotation) IsSetHost() bool {
+  return p.Host != nil
+}
+
+func (p *BinaryAnnotation) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 2:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField2(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.I32 {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.STRUCT {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Key = v
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField2(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBinary(ctx); err != nil {
+  return thrift.PrependError("error reading field 2: ", err)
+} else {
+  p.Value = v
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI32(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  temp := AnnotationType(v)
+  p.AnnotationType = temp
+}
+  return nil
+}
+
+func (p *BinaryAnnotation)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  p.Host = &Endpoint{}
+  if err := p.Host.Read(ctx, iprot); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
+  }
+  return nil
+}
+
+func (p *BinaryAnnotation) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "BinaryAnnotation"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField2(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *BinaryAnnotation) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "key", thrift.STRING, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Key)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField2(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "value", thrift.STRING, 2); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) }
+  if err := oprot.WriteBinary(ctx, p.Value); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "annotation_type", thrift.I32, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) }
+  if err := oprot.WriteI32(ctx, int32(p.AnnotationType)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) }
+  return err
+}
+
+func (p *BinaryAnnotation) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetHost() {
+    if err := oprot.WriteFieldBegin(ctx, "host", thrift.STRUCT, 4); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) }
+    if err := p.Host.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) }
+  }
+  return err
+}
+
+func (p *BinaryAnnotation) Equals(other *BinaryAnnotation) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Key != other.Key { return false }
+  if bytes.Compare(p.Value, other.Value) != 0 { return false }
+  if p.AnnotationType != other.AnnotationType { return false }
+  if !p.Host.Equals(other.Host) { return false }
+  return true
+}
+
+func (p *BinaryAnnotation) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
+}
+
+// A trace is a series of spans (often RPC calls) which form a latency tree.
+// 
+// The root span is where trace_id = id and parent_id = Nil. The root span is
+// usually the longest interval in the trace, starting with a SERVER_RECV
+// annotation and ending with a SERVER_SEND.
+// 
+// Attributes:
+//  - TraceID
+//  - Name: Span name in lowercase, rpc method for example
+// 
+// Conventionally, when the span name isn't known, name = "unknown".
+//  - ID
+//  - ParentID
+//  - Annotations
+//  - BinaryAnnotations
+//  - Debug
+//  - Timestamp: Microseconds from epoch of the creation of this span.
+// 
+// This value should be set directly by instrumentation, using the most
+// precise value possible. For example, gettimeofday or syncing nanoTime
+// against a tick of currentTimeMillis.
+// 
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this via Annotation.timestamp.
+// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
+// 
+// This field is optional for compatibility with old data: first-party span
+// stores are expected to support this at time of introduction.
+//  - Duration: Measurement of duration in microseconds, used to support queries.
+// 
+// This value should be set directly, where possible. Doing so encourages
+// precise measurement decoupled from problems of clocks, such as skew or NTP
+// updates causing time to move backwards.
+// 
+// For compatibility with instrumentation that precede this field, collectors
+// or span stores can derive this by subtracting Annotation.timestamp.
+// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
+// 
+// If this field is persisted as unset, zipkin will continue to work, except
+// duration query support will be implementation-specific. Similarly, setting
+// this field non-atomically is implementation-specific.
+// 
+// This field is i64 vs i32 to support spans longer than 35 minutes.
+//  - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
+// means the trace uses 128 bit traceIds instead of 64 bit.
+type Span struct {
+  TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"`
+  // unused field # 2
+  Name string `thrift:"name,3" db:"name" json:"name"`
+  ID int64 `thrift:"id,4" db:"id" json:"id"`
+  ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"`
+  Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"`
+  // unused field # 7
+  BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"`
+  Debug bool `thrift:"debug,9" db:"debug" json:"debug"`
+  Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"`
+  Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"`
+  TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"`
+}
+
+func NewSpan() *Span {
+  return &Span{}
+}
+
+
+func (p *Span) GetTraceID() int64 {
+  return p.TraceID
+}
+
+func (p *Span) GetName() string {
+  return p.Name
+}
+
+func (p *Span) GetID() int64 {
+  return p.ID
+}
+var Span_ParentID_DEFAULT int64
+func (p *Span) GetParentID() int64 {
+  if !p.IsSetParentID() {
+    return Span_ParentID_DEFAULT
+  }
+return *p.ParentID
+}
+
+func (p *Span) GetAnnotations() []*Annotation {
+  return p.Annotations
+}
+
+func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
+  return p.BinaryAnnotations
+}
+var Span_Debug_DEFAULT bool = false
+
+func (p *Span) GetDebug() bool {
+  return p.Debug
+}
+var Span_Timestamp_DEFAULT int64
+func (p *Span) GetTimestamp() int64 {
+  if !p.IsSetTimestamp() {
+    return Span_Timestamp_DEFAULT
+  }
+return *p.Timestamp
+}
+var Span_Duration_DEFAULT int64
+func (p *Span) GetDuration() int64 {
+  if !p.IsSetDuration() {
+    return Span_Duration_DEFAULT
+  }
+return *p.Duration
+}
+var Span_TraceIDHigh_DEFAULT int64
+func (p *Span) GetTraceIDHigh() int64 {
+  if !p.IsSetTraceIDHigh() {
+    return Span_TraceIDHigh_DEFAULT
+  }
+return *p.TraceIDHigh
+}
+func (p *Span) IsSetParentID() bool {
+  return p.ParentID != nil
+}
+
+func (p *Span) IsSetDebug() bool {
+  return p.Debug != Span_Debug_DEFAULT
+}
+
+func (p *Span) IsSetTimestamp() bool {
+  return p.Timestamp != nil
+}
+
+func (p *Span) IsSetDuration() bool {
+  return p.Duration != nil
+}
+
+func (p *Span) IsSetTraceIDHigh() bool {
+  return p.TraceIDHigh != nil
+}
+
+func (p *Span) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 3:
+      if fieldTypeId == thrift.STRING {
+        if err := p.ReadField3(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 4:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField4(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 5:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField5(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 6:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField6(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 8:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField8(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 9:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField9(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 10:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField10(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 11:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField11(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    case 12:
+      if fieldTypeId == thrift.I64 {
+        if err := p.ReadField12(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.TraceID = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField3(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadString(ctx); err != nil {
+  return thrift.PrependError("error reading field 3: ", err)
+} else {
+  p.Name = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField4(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 4: ", err)
+} else {
+  p.ID = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField5(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 5: ", err)
+} else {
+  p.ParentID = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField6(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Annotation, 0, size)
+  p.Annotations =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem0 := &Annotation{}
+    if err := _elem0.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
+    }
+    p.Annotations = append(p.Annotations, _elem0)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField8(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*BinaryAnnotation, 0, size)
+  p.BinaryAnnotations =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem1 := &BinaryAnnotation{}
+    if err := _elem1.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
+    }
+    p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *Span)  ReadField9(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 9: ", err)
+} else {
+  p.Debug = v
+}
+  return nil
+}
+
+func (p *Span)  ReadField10(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 10: ", err)
+} else {
+  p.Timestamp = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField11(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 11: ", err)
+} else {
+  p.Duration = &v
+}
+  return nil
+}
+
+func (p *Span)  ReadField12(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadI64(ctx); err != nil {
+  return thrift.PrependError("error reading field 12: ", err)
+} else {
+  p.TraceIDHigh = &v
+}
+  return nil
+}
+
+func (p *Span) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Span"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+    if err := p.writeField3(ctx, oprot); err != nil { return err }
+    if err := p.writeField4(ctx, oprot); err != nil { return err }
+    if err := p.writeField5(ctx, oprot); err != nil { return err }
+    if err := p.writeField6(ctx, oprot); err != nil { return err }
+    if err := p.writeField8(ctx, oprot); err != nil { return err }
+    if err := p.writeField9(ctx, oprot); err != nil { return err }
+    if err := p.writeField10(ctx, oprot); err != nil { return err }
+    if err := p.writeField11(ctx, oprot); err != nil { return err }
+    if err := p.writeField12(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Span) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "trace_id", thrift.I64, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.TraceID)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField3(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "name", thrift.STRING, 3); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) }
+  if err := oprot.WriteString(ctx, string(p.Name)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField4(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "id", thrift.I64, 4); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) }
+  if err := oprot.WriteI64(ctx, int64(p.ID)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField5(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetParentID() {
+    if err := oprot.WriteFieldBegin(ctx, "parent_id", thrift.I64, 5); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.ParentID)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField6(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "annotations", thrift.LIST, 6); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Annotations)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Annotations {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField8(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "binary_annotations", thrift.LIST, 8); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.BinaryAnnotations {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) }
+  return err
+}
+
+func (p *Span) writeField9(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetDebug() {
+    if err := oprot.WriteFieldBegin(ctx, "debug", thrift.BOOL, 9); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) }
+    if err := oprot.WriteBool(ctx, bool(p.Debug)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField10(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTimestamp() {
+    if err := oprot.WriteFieldBegin(ctx, "timestamp", thrift.I64, 10); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.Timestamp)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField11(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetDuration() {
+    if err := oprot.WriteFieldBegin(ctx, "duration", thrift.I64, 11); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.Duration)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) writeField12(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetTraceIDHigh() {
+    if err := oprot.WriteFieldBegin(ctx, "trace_id_high", thrift.I64, 12); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) }
+    if err := oprot.WriteI64(ctx, int64(*p.TraceIDHigh)); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) }
+  }
+  return err
+}
+
+func (p *Span) Equals(other *Span) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.TraceID != other.TraceID { return false }
+  if p.Name != other.Name { return false }
+  if p.ID != other.ID { return false }
+  if p.ParentID != other.ParentID {
+    if p.ParentID == nil || other.ParentID == nil {
+      return false
+    }
+    if (*p.ParentID) != (*other.ParentID) { return false }
+  }
+  if len(p.Annotations) != len(other.Annotations) { return false }
+  for i, _tgt := range p.Annotations {
+    _src2 := other.Annotations[i]
+    if !_tgt.Equals(_src2) { return false }
+  }
+  if len(p.BinaryAnnotations) != len(other.BinaryAnnotations) { return false }
+  for i, _tgt := range p.BinaryAnnotations {
+    _src3 := other.BinaryAnnotations[i]
+    if !_tgt.Equals(_src3) { return false }
+  }
+  if p.Debug != other.Debug { return false }
+  if p.Timestamp != other.Timestamp {
+    if p.Timestamp == nil || other.Timestamp == nil {
+      return false
+    }
+    if (*p.Timestamp) != (*other.Timestamp) { return false }
+  }
+  if p.Duration != other.Duration {
+    if p.Duration == nil || other.Duration == nil {
+      return false
+    }
+    if (*p.Duration) != (*other.Duration) { return false }
+  }
+  if p.TraceIDHigh != other.TraceIDHigh {
+    if p.TraceIDHigh == nil || other.TraceIDHigh == nil {
+      return false
+    }
+    if (*p.TraceIDHigh) != (*other.TraceIDHigh) { return false }
+  }
+  return true
+}
+
+func (p *Span) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Span(%+v)", *p)
+}
+
+// Attributes:
+//  - Ok
+type Response struct {
+  Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"`
+}
+
+func NewResponse() *Response {
+  return &Response{}
+}
+
+
+func (p *Response) GetOk() bool {
+  return p.Ok
+}
+func (p *Response) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+  var issetOk bool = false;
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.BOOL {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+        issetOk = true
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  if !issetOk{
+    return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set"));
+  }
+  return nil
+}
+
+func (p *Response)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  if v, err := iprot.ReadBool(ctx); err != nil {
+  return thrift.PrependError("error reading field 1: ", err)
+} else {
+  p.Ok = v
+}
+  return nil
+}
+
+func (p *Response) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "Response"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *Response) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "ok", thrift.BOOL, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) }
+  if err := oprot.WriteBool(ctx, bool(p.Ok)); err != nil {
+  return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) }
+  return err
+}
+
+func (p *Response) Equals(other *Response) bool {
+  if p == other {
+    return true
+  } else if p == nil || other == nil {
+    return false
+  }
+  if p.Ok != other.Ok { return false }
+  return true
+}
+
+func (p *Response) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("Response(%+v)", *p)
+}
+
+type ZipkinCollector interface {
+  // Parameters:
+  //  - Spans
+  SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error)
+}
+
+type ZipkinCollectorClient struct {
+  c thrift.TClient
+  meta thrift.ResponseMeta
+}
+
+func NewZipkinCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ZipkinCollectorClient {
+  return &ZipkinCollectorClient{
+    c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)),
+  }
+}
+
+func NewZipkinCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ZipkinCollectorClient {
+  return &ZipkinCollectorClient{
+    c: thrift.NewTStandardClient(iprot, oprot),
+  }
+}
+
+func NewZipkinCollectorClient(c thrift.TClient) *ZipkinCollectorClient {
+  return &ZipkinCollectorClient{
+    c: c,
+  }
+}
+
+func (p *ZipkinCollectorClient) Client_() thrift.TClient {
+  return p.c
+}
+
+func (p *ZipkinCollectorClient) LastResponseMeta_() thrift.ResponseMeta {
+  return p.meta
+}
+
+func (p *ZipkinCollectorClient) SetLastResponseMeta_(meta thrift.ResponseMeta) {
+  p.meta = meta
+}
+
+// Parameters:
+//  - Spans
+func (p *ZipkinCollectorClient) SubmitZipkinBatch(ctx context.Context, spans []*Span) (_r []*Response, _err error) {
+  var _args4 ZipkinCollectorSubmitZipkinBatchArgs
+  _args4.Spans = spans
+  var _result6 ZipkinCollectorSubmitZipkinBatchResult
+  var _meta5 thrift.ResponseMeta
+  _meta5, _err = p.Client_().Call(ctx, "submitZipkinBatch", &_args4, &_result6)
+  p.SetLastResponseMeta_(_meta5)
+  if _err != nil {
+    return
+  }
+  return _result6.GetSuccess(), nil
+}
+
+type ZipkinCollectorProcessor struct {
+  processorMap map[string]thrift.TProcessorFunction
+  handler ZipkinCollector
+}
+
+func (p *ZipkinCollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
+  p.processorMap[key] = processor
+}
+
+func (p *ZipkinCollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
+  processor, ok = p.processorMap[key]
+  return processor, ok
+}
+
+func (p *ZipkinCollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
+  return p.processorMap
+}
+
+func NewZipkinCollectorProcessor(handler ZipkinCollector) *ZipkinCollectorProcessor {
+
+  self7 := &ZipkinCollectorProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)}
+  self7.processorMap["submitZipkinBatch"] = &zipkinCollectorProcessorSubmitZipkinBatch{handler:handler}
+return self7
+}
+
+func (p *ZipkinCollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  name, _, seqId, err2 := iprot.ReadMessageBegin(ctx)
+  if err2 != nil { return false, thrift.WrapTException(err2) }
+  if processor, ok := p.GetProcessorFunction(name); ok {
+    return processor.Process(ctx, seqId, iprot, oprot)
+  }
+  iprot.Skip(ctx, thrift.STRUCT)
+  iprot.ReadMessageEnd(ctx)
+  x8 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name)
+  oprot.WriteMessageBegin(ctx, name, thrift.EXCEPTION, seqId)
+  x8.Write(ctx, oprot)
+  oprot.WriteMessageEnd(ctx)
+  oprot.Flush(ctx)
+  return false, x8
+
+}
+
+type zipkinCollectorProcessorSubmitZipkinBatch struct {
+  handler ZipkinCollector
+}
+
+func (p *zipkinCollectorProcessorSubmitZipkinBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
+  args := ZipkinCollectorSubmitZipkinBatchArgs{}
+  var err2 error
+  if err2 = args.Read(ctx, iprot); err2 != nil {
+    iprot.ReadMessageEnd(ctx)
+    x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return false, thrift.WrapTException(err2)
+  }
+  iprot.ReadMessageEnd(ctx)
+
+  tickerCancel := func() {}
+  // Start a goroutine to do server side connectivity check.
+  if thrift.ServerConnectivityCheckInterval > 0 {
+    var cancel context.CancelFunc
+    ctx, cancel = context.WithCancel(ctx)
+    defer cancel()
+    var tickerCtx context.Context
+    tickerCtx, tickerCancel = context.WithCancel(context.Background())
+    defer tickerCancel()
+    go func(ctx context.Context, cancel context.CancelFunc) {
+      ticker := time.NewTicker(thrift.ServerConnectivityCheckInterval)
+      defer ticker.Stop()
+      for {
+        select {
+        case <-ctx.Done():
+          return
+        case <-ticker.C:
+          if !iprot.Transport().IsOpen() {
+            cancel()
+            return
+          }
+        }
+      }
+    }(tickerCtx, cancel)
+  }
+
+  result := ZipkinCollectorSubmitZipkinBatchResult{}
+  var retval []*Response
+  if retval, err2 = p.handler.SubmitZipkinBatch(ctx, args.Spans); err2 != nil {
+    tickerCancel()
+    if err2 == thrift.ErrAbandonRequest {
+      return false, thrift.WrapTException(err2)
+    }
+    x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitZipkinBatch: " + err2.Error())
+    oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.EXCEPTION, seqId)
+    x.Write(ctx, oprot)
+    oprot.WriteMessageEnd(ctx)
+    oprot.Flush(ctx)
+    return true, thrift.WrapTException(err2)
+  } else {
+    result.Success = retval
+  }
+  tickerCancel()
+  if err2 = oprot.WriteMessageBegin(ctx, "submitZipkinBatch", thrift.REPLY, seqId); err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = result.Write(ctx, oprot); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.WriteMessageEnd(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err2 = oprot.Flush(ctx); err == nil && err2 != nil {
+    err = thrift.WrapTException(err2)
+  }
+  if err != nil {
+    return
+  }
+  return true, err
+}
+
+
+// HELPER FUNCTIONS AND STRUCTURES
+
+// Attributes:
+//  - Spans
+type ZipkinCollectorSubmitZipkinBatchArgs struct {
+  Spans []*Span `thrift:"spans,1" db:"spans" json:"spans"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchArgs() *ZipkinCollectorSubmitZipkinBatchArgs {
+  return &ZipkinCollectorSubmitZipkinBatchArgs{}
+}
+
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) GetSpans() []*Span {
+  return p.Spans
+}
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 1:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField1(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs)  ReadField1(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Span, 0, size)
+  p.Spans =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem9 := &Span{}
+    if err := _elem9.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem9), err)
+    }
+    p.Spans = append(p.Spans, _elem9)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_args"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField1(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) writeField1(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if err := oprot.WriteFieldBegin(ctx, "spans", thrift.LIST, 1); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:spans: ", p), err) }
+  if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Spans)); err != nil {
+    return thrift.PrependError("error writing list begin: ", err)
+  }
+  for _, v := range p.Spans {
+    if err := v.Write(ctx, oprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+    }
+  }
+  if err := oprot.WriteListEnd(ctx); err != nil {
+    return thrift.PrependError("error writing list end: ", err)
+  }
+  if err := oprot.WriteFieldEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write field end error 1:spans: ", p), err) }
+  return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchArgs) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchArgs(%+v)", *p)
+}
+
+// Attributes:
+//  - Success
+type ZipkinCollectorSubmitZipkinBatchResult struct {
+  Success []*Response `thrift:"success,0" db:"success" json:"success,omitempty"`
+}
+
+func NewZipkinCollectorSubmitZipkinBatchResult() *ZipkinCollectorSubmitZipkinBatchResult {
+  return &ZipkinCollectorSubmitZipkinBatchResult{}
+}
+
+var ZipkinCollectorSubmitZipkinBatchResult_Success_DEFAULT []*Response
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) GetSuccess() []*Response {
+  return p.Success
+}
+func (p *ZipkinCollectorSubmitZipkinBatchResult) IsSetSuccess() bool {
+  return p.Success != nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Read(ctx context.Context, iprot thrift.TProtocol) error {
+  if _, err := iprot.ReadStructBegin(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
+  }
+
+
+  for {
+    _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin(ctx)
+    if err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
+    }
+    if fieldTypeId == thrift.STOP { break; }
+    switch fieldId {
+    case 0:
+      if fieldTypeId == thrift.LIST {
+        if err := p.ReadField0(ctx, iprot); err != nil {
+          return err
+        }
+      } else {
+        if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+          return err
+        }
+      }
+    default:
+      if err := iprot.Skip(ctx, fieldTypeId); err != nil {
+        return err
+      }
+    }
+    if err := iprot.ReadFieldEnd(ctx); err != nil {
+      return err
+    }
+  }
+  if err := iprot.ReadStructEnd(ctx); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult)  ReadField0(ctx context.Context, iprot thrift.TProtocol) error {
+  _, size, err := iprot.ReadListBegin(ctx)
+  if err != nil {
+    return thrift.PrependError("error reading list begin: ", err)
+  }
+  tSlice := make([]*Response, 0, size)
+  p.Success =  tSlice
+  for i := 0; i < size; i ++ {
+    _elem10 := &Response{}
+    if err := _elem10.Read(ctx, iprot); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err)
+    }
+    p.Success = append(p.Success, _elem10)
+  }
+  if err := iprot.ReadListEnd(ctx); err != nil {
+    return thrift.PrependError("error reading list end: ", err)
+  }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) Write(ctx context.Context, oprot thrift.TProtocol) error {
+  if err := oprot.WriteStructBegin(ctx, "submitZipkinBatch_result"); err != nil {
+    return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) }
+  if p != nil {
+    if err := p.writeField0(ctx, oprot); err != nil { return err }
+  }
+  if err := oprot.WriteFieldStop(ctx); err != nil {
+    return thrift.PrependError("write field stop error: ", err) }
+  if err := oprot.WriteStructEnd(ctx); err != nil {
+    return thrift.PrependError("write struct stop error: ", err) }
+  return nil
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) writeField0(ctx context.Context, oprot thrift.TProtocol) (err error) {
+  if p.IsSetSuccess() {
+    if err := oprot.WriteFieldBegin(ctx, "success", thrift.LIST, 0); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) }
+    if err := oprot.WriteListBegin(ctx, thrift.STRUCT, len(p.Success)); err != nil {
+      return thrift.PrependError("error writing list begin: ", err)
+    }
+    for _, v := range p.Success {
+      if err := v.Write(ctx, oprot); err != nil {
+        return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
+      }
+    }
+    if err := oprot.WriteListEnd(ctx); err != nil {
+      return thrift.PrependError("error writing list end: ", err)
+    }
+    if err := oprot.WriteFieldEnd(ctx); err != nil {
+      return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) }
+  }
+  return err
+}
+
+func (p *ZipkinCollectorSubmitZipkinBatchResult) String() string {
+  if p == nil {
+    return "<nil>"
+  }
+  return fmt.Sprintf("ZipkinCollectorSubmitZipkinBatchResult(%+v)", *p)
+}
+
+
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/.nocover b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/.nocover
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/README.md b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
new file mode 100644
index 0000000..c4c38ae
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/README.md
@@ -0,0 +1,11 @@
+# Apache Thrift
+
+This is a partial copy of Apache Thrift v0.14.1 (https://github.com/apache/thrift/commit/f6fa1794539e68ac294038ac388d6bde40a6c237).
+
+It is vendored code to avoid compatibility issues with Thrift versions.
+
+The file logger.go is modified to remove dependency on "testing" (see Issue #585).
+
+See:
+  * https://github.com/jaegertracing/jaeger-client-go/pull/584
+  * https://github.com/jaegertracing/jaeger-client-go/pull/303
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
new file mode 100644
index 0000000..32d5b01
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/application_exception.go
@@ -0,0 +1,180 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+const (
+	UNKNOWN_APPLICATION_EXCEPTION  = 0
+	UNKNOWN_METHOD                 = 1
+	INVALID_MESSAGE_TYPE_EXCEPTION = 2
+	WRONG_METHOD_NAME              = 3
+	BAD_SEQUENCE_ID                = 4
+	MISSING_RESULT                 = 5
+	INTERNAL_ERROR                 = 6
+	PROTOCOL_ERROR                 = 7
+	INVALID_TRANSFORM              = 8
+	INVALID_PROTOCOL               = 9
+	UNSUPPORTED_CLIENT_TYPE        = 10
+)
+
+var defaultApplicationExceptionMessage = map[int32]string{
+	UNKNOWN_APPLICATION_EXCEPTION:  "unknown application exception",
+	UNKNOWN_METHOD:                 "unknown method",
+	INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type",
+	WRONG_METHOD_NAME:              "wrong method name",
+	BAD_SEQUENCE_ID:                "bad sequence ID",
+	MISSING_RESULT:                 "missing result",
+	INTERNAL_ERROR:                 "unknown internal error",
+	PROTOCOL_ERROR:                 "unknown protocol error",
+	INVALID_TRANSFORM:              "Invalid transform",
+	INVALID_PROTOCOL:               "Invalid protocol",
+	UNSUPPORTED_CLIENT_TYPE:        "Unsupported client type",
+}
+
+// Application level Thrift exception
+type TApplicationException interface {
+	TException
+	TypeId() int32
+	Read(ctx context.Context, iprot TProtocol) error
+	Write(ctx context.Context, oprot TProtocol) error
+}
+
+type tApplicationException struct {
+	message string
+	type_   int32
+}
+
+var _ TApplicationException = (*tApplicationException)(nil)
+
+func (tApplicationException) TExceptionType() TExceptionType {
+	return TExceptionTypeApplication
+}
+
+func (e tApplicationException) Error() string {
+	if e.message != "" {
+		return e.message
+	}
+	return defaultApplicationExceptionMessage[e.type_]
+}
+
+func NewTApplicationException(type_ int32, message string) TApplicationException {
+	return &tApplicationException{message, type_}
+}
+
+func (p *tApplicationException) TypeId() int32 {
+	return p.type_
+}
+
+func (p *tApplicationException) Read(ctx context.Context, iprot TProtocol) error {
+	// TODO: this should really be generated by the compiler
+	_, err := iprot.ReadStructBegin(ctx)
+	if err != nil {
+		return err
+	}
+
+	message := ""
+	type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
+
+	for {
+		_, ttype, id, err := iprot.ReadFieldBegin(ctx)
+		if err != nil {
+			return err
+		}
+		if ttype == STOP {
+			break
+		}
+		switch id {
+		case 1:
+			if ttype == STRING {
+				if message, err = iprot.ReadString(ctx); err != nil {
+					return err
+				}
+			} else {
+				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+					return err
+				}
+			}
+		case 2:
+			if ttype == I32 {
+				if type_, err = iprot.ReadI32(ctx); err != nil {
+					return err
+				}
+			} else {
+				if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+					return err
+				}
+			}
+		default:
+			if err = SkipDefaultDepth(ctx, iprot, ttype); err != nil {
+				return err
+			}
+		}
+		if err = iprot.ReadFieldEnd(ctx); err != nil {
+			return err
+		}
+	}
+	if err := iprot.ReadStructEnd(ctx); err != nil {
+		return err
+	}
+
+	p.message = message
+	p.type_ = type_
+
+	return nil
+}
+
+func (p *tApplicationException) Write(ctx context.Context, oprot TProtocol) (err error) {
+	err = oprot.WriteStructBegin(ctx, "TApplicationException")
+	if len(p.Error()) > 0 {
+		err = oprot.WriteFieldBegin(ctx, "message", STRING, 1)
+		if err != nil {
+			return
+		}
+		err = oprot.WriteString(ctx, p.Error())
+		if err != nil {
+			return
+		}
+		err = oprot.WriteFieldEnd(ctx)
+		if err != nil {
+			return
+		}
+	}
+	err = oprot.WriteFieldBegin(ctx, "type", I32, 2)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteI32(ctx, p.type_)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldEnd(ctx)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteFieldStop(ctx)
+	if err != nil {
+		return
+	}
+	err = oprot.WriteStructEnd(ctx)
+	return
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
new file mode 100644
index 0000000..45c880d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/binary_protocol.go
@@ -0,0 +1,555 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+type TBinaryProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+	cfg           *TConfiguration
+	buffer        [64]byte
+}
+
+type TBinaryProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTBinaryProtocolConf instead.
+func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
+	return NewTBinaryProtocolConf(t, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTBinaryProtocolConf instead.
+func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
+	return NewTBinaryProtocolConf(t, &TConfiguration{
+		TBinaryStrictRead:  &strictRead,
+		TBinaryStrictWrite: &strictWrite,
+
+		noPropagation: true,
+	})
+}
+
+func NewTBinaryProtocolConf(t TTransport, conf *TConfiguration) *TBinaryProtocol {
+	PropagateTConfiguration(t, conf)
+	p := &TBinaryProtocol{
+		origTransport: t,
+		cfg:           conf,
+	}
+	if et, ok := t.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(t)
+	}
+	return p
+}
+
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
+func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
+	return NewTBinaryProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// Deprecated: Use NewTBinaryProtocolFactoryConf instead.
+func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
+	return NewTBinaryProtocolFactoryConf(&TConfiguration{
+		TBinaryStrictRead:  &strictRead,
+		TBinaryStrictWrite: &strictWrite,
+
+		noPropagation: true,
+	})
+}
+
+func NewTBinaryProtocolFactoryConf(conf *TConfiguration) *TBinaryProtocolFactory {
+	return &TBinaryProtocolFactory{
+		cfg: conf,
+	}
+}
+
+func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
+	return NewTBinaryProtocolConf(t, p.cfg)
+}
+
+func (p *TBinaryProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
+}
+
+/**
+ * Writing Methods
+ */
+
+func (p *TBinaryProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+	if p.cfg.GetTBinaryStrictWrite() {
+		version := uint32(VERSION_1) | uint32(typeId)
+		e := p.WriteI32(ctx, int32(version))
+		if e != nil {
+			return e
+		}
+		e = p.WriteString(ctx, name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(ctx, seqId)
+		return e
+	} else {
+		e := p.WriteString(ctx, name)
+		if e != nil {
+			return e
+		}
+		e = p.WriteByte(ctx, int8(typeId))
+		if e != nil {
+			return e
+		}
+		e = p.WriteI32(ctx, seqId)
+		return e
+	}
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteMessageEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteStructEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	e := p.WriteByte(ctx, int8(typeId))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI16(ctx, id)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteFieldStop(ctx context.Context) error {
+	e := p.WriteByte(ctx, STOP)
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	e := p.WriteByte(ctx, int8(keyType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteByte(ctx, int8(valueType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(ctx, int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteMapEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	e := p.WriteByte(ctx, int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(ctx, int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteListEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	e := p.WriteByte(ctx, int8(elemType))
+	if e != nil {
+		return e
+	}
+	e = p.WriteI32(ctx, int32(size))
+	return e
+}
+
+func (p *TBinaryProtocol) WriteSetEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) WriteBool(ctx context.Context, value bool) error {
+	if value {
+		return p.WriteByte(ctx, 1)
+	}
+	return p.WriteByte(ctx, 0)
+}
+
+func (p *TBinaryProtocol) WriteByte(ctx context.Context, value int8) error {
+	e := p.trans.WriteByte(byte(value))
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI16(ctx context.Context, value int16) error {
+	v := p.buffer[0:2]
+	binary.BigEndian.PutUint16(v, uint16(value))
+	_, e := p.trans.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI32(ctx context.Context, value int32) error {
+	v := p.buffer[0:4]
+	binary.BigEndian.PutUint32(v, uint32(value))
+	_, e := p.trans.Write(v)
+	return NewTProtocolException(e)
+}
+
+func (p *TBinaryProtocol) WriteI64(ctx context.Context, value int64) error {
+	v := p.buffer[0:8]
+	binary.BigEndian.PutUint64(v, uint64(value))
+	_, err := p.trans.Write(v)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteDouble(ctx context.Context, value float64) error {
+	return p.WriteI64(ctx, int64(math.Float64bits(value)))
+}
+
+func (p *TBinaryProtocol) WriteString(ctx context.Context, value string) error {
+	e := p.WriteI32(ctx, int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.trans.WriteString(value)
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	e := p.WriteI32(ctx, int32(len(value)))
+	if e != nil {
+		return e
+	}
+	_, err := p.trans.Write(value)
+	return NewTProtocolException(err)
+}
+
+/**
+ * Reading methods
+ */
+
+func (p *TBinaryProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	size, e := p.ReadI32(ctx)
+	if e != nil {
+		return "", typeId, 0, NewTProtocolException(e)
+	}
+	if size < 0 {
+		typeId = TMessageType(size & 0x0ff)
+		version := int64(int64(size) & VERSION_MASK)
+		if version != VERSION_1 {
+			return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
+		}
+		name, e = p.ReadString(ctx)
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		seqId, e = p.ReadI32(ctx)
+		if e != nil {
+			return name, typeId, seqId, NewTProtocolException(e)
+		}
+		return name, typeId, seqId, nil
+	}
+	if p.cfg.GetTBinaryStrictRead() {
+		return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
+	}
+	name, e2 := p.readStringBody(size)
+	if e2 != nil {
+		return name, typeId, seqId, e2
+	}
+	b, e3 := p.ReadByte(ctx)
+	if e3 != nil {
+		return name, typeId, seqId, e3
+	}
+	typeId = TMessageType(b)
+	seqId, e4 := p.ReadI32(ctx)
+	if e4 != nil {
+		return name, typeId, seqId, e4
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TBinaryProtocol) ReadMessageEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	return
+}
+
+func (p *TBinaryProtocol) ReadStructEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, seqId int16, err error) {
+	t, err := p.ReadByte(ctx)
+	typeId = TType(t)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if t != STOP {
+		seqId, err = p.ReadI16(ctx)
+	}
+	return name, typeId, seqId, err
+}
+
+func (p *TBinaryProtocol) ReadFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
+
+func (p *TBinaryProtocol) ReadMapBegin(ctx context.Context) (kType, vType TType, size int, err error) {
+	k, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	kType = TType(k)
+	v, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	vType = TType(v)
+	size32, e := p.ReadI32(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return kType, vType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadMapEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	b, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	return
+}
+
+func (p *TBinaryProtocol) ReadListEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	b, e := p.ReadByte(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	elemType = TType(b)
+	size32, e := p.ReadI32(ctx)
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+	return elemType, size, nil
+}
+
+func (p *TBinaryProtocol) ReadSetEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TBinaryProtocol) ReadBool(ctx context.Context) (bool, error) {
+	b, e := p.ReadByte(ctx)
+	v := true
+	if b != 1 {
+		v = false
+	}
+	return v, e
+}
+
+func (p *TBinaryProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.trans.ReadByte()
+	return int8(v), err
+}
+
+func (p *TBinaryProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	buf := p.buffer[0:2]
+	err = p.readAll(ctx, buf)
+	value = int16(binary.BigEndian.Uint16(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	buf := p.buffer[0:4]
+	err = p.readAll(ctx, buf)
+	value = int32(binary.BigEndian.Uint32(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(ctx, buf)
+	value = int64(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	buf := p.buffer[0:8]
+	err = p.readAll(ctx, buf)
+	value = math.Float64frombits(binary.BigEndian.Uint64(buf))
+	return value, err
+}
+
+func (p *TBinaryProtocol) ReadString(ctx context.Context) (value string, err error) {
+	size, e := p.ReadI32(ctx)
+	if e != nil {
+		return "", e
+	}
+	err = checkSizeForProtocol(size, p.cfg)
+	if err != nil {
+		return
+	}
+	if size < 0 {
+		err = invalidDataLength
+		return
+	}
+	if size == 0 {
+		return "", nil
+	}
+	if size < int32(len(p.buffer)) {
+		// Avoid allocation on small reads
+		buf := p.buffer[:size]
+		read, e := io.ReadFull(p.trans, buf)
+		return string(buf[:read]), NewTProtocolException(e)
+	}
+
+	return p.readStringBody(size)
+}
+
+func (p *TBinaryProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+	size, e := p.ReadI32(ctx)
+	if e != nil {
+		return nil, e
+	}
+	if err := checkSizeForProtocol(size, p.cfg); err != nil {
+		return nil, err
+	}
+
+	buf, err := safeReadBytes(size, p.trans)
+	return buf, NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.trans.Flush(ctx))
+}
+
+func (p *TBinaryProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TBinaryProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+func (p *TBinaryProtocol) readAll(ctx context.Context, buf []byte) (err error) {
+	var read int
+	_, deadlineSet := ctx.Deadline()
+	for {
+		read, err = io.ReadFull(p.trans, buf)
+		if deadlineSet && read == 0 && isTimeoutError(err) && ctx.Err() == nil {
+			// This is I/O timeout without anything read,
+			// and we still have time left, keep retrying.
+			continue
+		}
+		// For anything else, don't retry
+		break
+	}
+	return NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
+	buf, err := safeReadBytes(size, p.trans)
+	return string(buf), NewTProtocolException(err)
+}
+
+func (p *TBinaryProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+	PropagateTConfiguration(p.origTransport, conf)
+	p.cfg = conf
+}
+
+var (
+	_ TConfigurationSetter = (*TBinaryProtocolFactory)(nil)
+	_ TConfigurationSetter = (*TBinaryProtocol)(nil)
+)
+
+// This function is shared between TBinaryProtocol and TCompactProtocol.
+//
+// It tries to read size bytes from trans, in a way that prevents large
+// allocations when size is insanely large (mostly caused by malformed message).
+func safeReadBytes(size int32, trans io.Reader) ([]byte, error) {
+	if size < 0 {
+		return nil, nil
+	}
+
+	buf := new(bytes.Buffer)
+	_, err := io.CopyN(buf, trans, int64(size))
+	return buf.Bytes(), err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/client.go b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
new file mode 100644
index 0000000..ea2c01f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/client.go
@@ -0,0 +1,109 @@
+package thrift
+
+import (
+	"context"
+	"fmt"
+)
+
+// ResponseMeta represents the metadata attached to the response.
+type ResponseMeta struct {
+	// The headers in the response, if any.
+	// If the underlying transport/protocol is not THeader, this will always be nil.
+	Headers THeaderMap
+}
+
+type TClient interface {
+	Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error)
+}
+
+type TStandardClient struct {
+	seqId        int32
+	iprot, oprot TProtocol
+}
+
+// TStandardClient implements TClient, and uses the standard message format for Thrift.
+// It is not safe for concurrent use.
+func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient {
+	return &TStandardClient{
+		iprot: inputProtocol,
+		oprot: outputProtocol,
+	}
+}
+
+func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error {
+	// Set headers from context object on THeaderProtocol
+	if headerProt, ok := oprot.(*THeaderProtocol); ok {
+		headerProt.ClearWriteHeaders()
+		for _, key := range GetWriteHeaderList(ctx) {
+			if value, ok := GetHeader(ctx, key); ok {
+				headerProt.SetWriteHeader(key, value)
+			}
+		}
+	}
+
+	if err := oprot.WriteMessageBegin(ctx, method, CALL, seqId); err != nil {
+		return err
+	}
+	if err := args.Write(ctx, oprot); err != nil {
+		return err
+	}
+	if err := oprot.WriteMessageEnd(ctx); err != nil {
+		return err
+	}
+	return oprot.Flush(ctx)
+}
+
+func (p *TStandardClient) Recv(ctx context.Context, iprot TProtocol, seqId int32, method string, result TStruct) error {
+	rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin(ctx)
+	if err != nil {
+		return err
+	}
+
+	if method != rMethod {
+		return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method))
+	} else if seqId != rSeqId {
+		return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method))
+	} else if rTypeId == EXCEPTION {
+		var exception tApplicationException
+		if err := exception.Read(ctx, iprot); err != nil {
+			return err
+		}
+
+		if err := iprot.ReadMessageEnd(ctx); err != nil {
+			return err
+		}
+
+		return &exception
+	} else if rTypeId != REPLY {
+		return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method))
+	}
+
+	if err := result.Read(ctx, iprot); err != nil {
+		return err
+	}
+
+	return iprot.ReadMessageEnd(ctx)
+}
+
+func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) (ResponseMeta, error) {
+	p.seqId++
+	seqId := p.seqId
+
+	if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil {
+		return ResponseMeta{}, err
+	}
+
+	// method is oneway
+	if result == nil {
+		return ResponseMeta{}, nil
+	}
+
+	err := p.Recv(ctx, p.iprot, seqId, method, result)
+	var headers THeaderMap
+	if hp, ok := p.iprot.(*THeaderProtocol); ok {
+		headers = hp.transport.readHeaders
+	}
+	return ResponseMeta{
+		Headers: headers,
+	}, err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
new file mode 100644
index 0000000..a49225d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/compact_protocol.go
@@ -0,0 +1,865 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+)
+
+const (
+	COMPACT_PROTOCOL_ID       = 0x082
+	COMPACT_VERSION           = 1
+	COMPACT_VERSION_MASK      = 0x1f
+	COMPACT_TYPE_MASK         = 0x0E0
+	COMPACT_TYPE_BITS         = 0x07
+	COMPACT_TYPE_SHIFT_AMOUNT = 5
+)
+
+type tCompactType byte
+
+const (
+	COMPACT_BOOLEAN_TRUE  = 0x01
+	COMPACT_BOOLEAN_FALSE = 0x02
+	COMPACT_BYTE          = 0x03
+	COMPACT_I16           = 0x04
+	COMPACT_I32           = 0x05
+	COMPACT_I64           = 0x06
+	COMPACT_DOUBLE        = 0x07
+	COMPACT_BINARY        = 0x08
+	COMPACT_LIST          = 0x09
+	COMPACT_SET           = 0x0A
+	COMPACT_MAP           = 0x0B
+	COMPACT_STRUCT        = 0x0C
+)
+
+var (
+	ttypeToCompactType map[TType]tCompactType
+)
+
+func init() {
+	ttypeToCompactType = map[TType]tCompactType{
+		STOP:   STOP,
+		BOOL:   COMPACT_BOOLEAN_TRUE,
+		BYTE:   COMPACT_BYTE,
+		I16:    COMPACT_I16,
+		I32:    COMPACT_I32,
+		I64:    COMPACT_I64,
+		DOUBLE: COMPACT_DOUBLE,
+		STRING: COMPACT_BINARY,
+		LIST:   COMPACT_LIST,
+		SET:    COMPACT_SET,
+		MAP:    COMPACT_MAP,
+		STRUCT: COMPACT_STRUCT,
+	}
+}
+
+type TCompactProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTCompactProtocolFactoryConf instead.
+func NewTCompactProtocolFactory() *TCompactProtocolFactory {
+	return NewTCompactProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+func NewTCompactProtocolFactoryConf(conf *TConfiguration) *TCompactProtocolFactory {
+	return &TCompactProtocolFactory{
+		cfg: conf,
+	}
+}
+
+func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTCompactProtocolConf(trans, p.cfg)
+}
+
+func (p *TCompactProtocolFactory) SetTConfiguration(conf *TConfiguration) {
+	p.cfg = conf
+}
+
+type TCompactProtocol struct {
+	trans         TRichTransport
+	origTransport TTransport
+
+	cfg *TConfiguration
+
+	// Used to keep track of the last field for the current and previous structs,
+	// so we can do the delta stuff.
+	lastField   []int
+	lastFieldId int
+
+	// If we encounter a boolean field begin, save the TField here so it can
+	// have the value incorporated.
+	booleanFieldName    string
+	booleanFieldId      int16
+	booleanFieldPending bool
+
+	// If we read a field header, and it's a boolean field, save the boolean
+	// value here so that readBool can use it.
+	boolValue          bool
+	boolValueIsNotNull bool
+	buffer             [64]byte
+}
+
+// Deprecated: Use NewTCompactProtocolConf instead.
+func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
+	return NewTCompactProtocolConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+func NewTCompactProtocolConf(trans TTransport, conf *TConfiguration) *TCompactProtocol {
+	PropagateTConfiguration(trans, conf)
+	p := &TCompactProtocol{
+		origTransport: trans,
+		cfg:           conf,
+	}
+	if et, ok := trans.(TRichTransport); ok {
+		p.trans = et
+	} else {
+		p.trans = NewTRichTransport(trans)
+	}
+
+	return p
+}
+
+//
+// Public Writing methods.
+//
+
+// Write a message header to the wire. Compact Protocol messages contain the
+// protocol version so we can migrate forwards in the future if need be.
+func (p *TCompactProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error {
+	err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	_, err = p.writeVarint32(seqid)
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	e := p.WriteString(ctx, name)
+	return e
+
+}
+
+func (p *TCompactProtocol) WriteMessageEnd(ctx context.Context) error { return nil }
+
+// Write a struct begin. This doesn't actually put anything on the wire. We
+// use it as an opportunity to put special placeholder markers on the field
+// stack so we can get the field id deltas correct.
+func (p *TCompactProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return nil
+}
+
+// Write a struct end. This doesn't actually put anything on the wire. We use
+// this as an opportunity to pop the last field from the current struct off
+// of the field stack.
+func (p *TCompactProtocol) WriteStructEnd(ctx context.Context) error {
+	if len(p.lastField) <= 0 {
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("WriteStructEnd called without matching WriteStructBegin call before"))
+	}
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+func (p *TCompactProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	if typeId == BOOL {
+		// we want to possibly include the value, so we'll wait.
+		p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
+		return nil
+	}
+	_, err := p.writeFieldBeginInternal(ctx, name, typeId, id, 0xFF)
+	return NewTProtocolException(err)
+}
+
+// The workhorse of writeFieldBegin. It has the option of doing a
+// 'type override' of the type header. This is used specifically in the
+// boolean field case.
+func (p *TCompactProtocol) writeFieldBeginInternal(ctx context.Context, name string, typeId TType, id int16, typeOverride byte) (int, error) {
+	// short lastField = lastField_.pop();
+
+	// if there's a type override, use that.
+	var typeToWrite byte
+	if typeOverride == 0xFF {
+		typeToWrite = byte(p.getCompactType(typeId))
+	} else {
+		typeToWrite = typeOverride
+	}
+	// check if we can use delta encoding for the field id
+	fieldId := int(id)
+	written := 0
+	if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
+		// write them together
+		err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+	} else {
+		// write them separate
+		err := p.writeByteDirect(typeToWrite)
+		if err != nil {
+			return 0, err
+		}
+		err = p.WriteI16(ctx, id)
+		written = 1 + 2
+		if err != nil {
+			return 0, err
+		}
+	}
+
+	p.lastFieldId = fieldId
+	return written, nil
+}
+
+func (p *TCompactProtocol) WriteFieldEnd(ctx context.Context) error { return nil }
+
+func (p *TCompactProtocol) WriteFieldStop(ctx context.Context) error {
+	err := p.writeByteDirect(STOP)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	if size == 0 {
+		err := p.writeByteDirect(0)
+		return NewTProtocolException(err)
+	}
+	_, err := p.writeVarint32(int32(size))
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteMapEnd(ctx context.Context) error { return nil }
+
+// Write a list header.
+func (p *TCompactProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteListEnd(ctx context.Context) error { return nil }
+
+// Write a set header.
+func (p *TCompactProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	_, err := p.writeCollectionBegin(elemType, size)
+	return NewTProtocolException(err)
+}
+
+func (p *TCompactProtocol) WriteSetEnd(ctx context.Context) error { return nil }
+
+func (p *TCompactProtocol) WriteBool(ctx context.Context, value bool) error {
+	v := byte(COMPACT_BOOLEAN_FALSE)
+	if value {
+		v = byte(COMPACT_BOOLEAN_TRUE)
+	}
+	if p.booleanFieldPending {
+		// we haven't written the field header yet
+		_, err := p.writeFieldBeginInternal(ctx, p.booleanFieldName, BOOL, p.booleanFieldId, v)
+		p.booleanFieldPending = false
+		return NewTProtocolException(err)
+	}
+	// we're not part of a field, so just write the value.
+	err := p.writeByteDirect(v)
+	return NewTProtocolException(err)
+}
+
+// Write a byte. Nothing to see here!
+func (p *TCompactProtocol) WriteByte(ctx context.Context, value int8) error {
+	err := p.writeByteDirect(byte(value))
+	return NewTProtocolException(err)
+}
+
+// Write an I16 as a zigzag varint.
+func (p *TCompactProtocol) WriteI16(ctx context.Context, value int16) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
+	return NewTProtocolException(err)
+}
+
+// Write an i32 as a zigzag varint.
+func (p *TCompactProtocol) WriteI32(ctx context.Context, value int32) error {
+	_, err := p.writeVarint32(p.int32ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write an i64 as a zigzag varint.
+func (p *TCompactProtocol) WriteI64(ctx context.Context, value int64) error {
+	_, err := p.writeVarint64(p.int64ToZigzag(value))
+	return NewTProtocolException(err)
+}
+
+// Write a double to the wire as 8 bytes.
+func (p *TCompactProtocol) WriteDouble(ctx context.Context, value float64) error {
+	buf := p.buffer[0:8]
+	binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
+	_, err := p.trans.Write(buf)
+	return NewTProtocolException(err)
+}
+
+// Write a string to the wire with a varint size preceding.
+func (p *TCompactProtocol) WriteString(ctx context.Context, value string) error {
+	_, e := p.writeVarint32(int32(len(value)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(value) > 0 {
+	}
+	_, e = p.trans.WriteString(value)
+	return e
+}
+
+// Write a byte array, using a varint for the size.
+func (p *TCompactProtocol) WriteBinary(ctx context.Context, bin []byte) error {
+	_, e := p.writeVarint32(int32(len(bin)))
+	if e != nil {
+		return NewTProtocolException(e)
+	}
+	if len(bin) > 0 {
+		_, e = p.trans.Write(bin)
+		return NewTProtocolException(e)
+	}
+	return nil
+}
+
+//
+// Reading methods.
+//
+
+// Read a message header.
+func (p *TCompactProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	var protocolId byte
+
+	_, deadlineSet := ctx.Deadline()
+	for {
+		protocolId, err = p.readByteDirect()
+		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+			// keep retrying I/O timeout errors since we still have
+			// time left
+			continue
+		}
+		// For anything else, don't retry
+		break
+	}
+	if err != nil {
+		return
+	}
+
+	if protocolId != COMPACT_PROTOCOL_ID {
+		e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
+		return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
+	}
+
+	versionAndType, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	version := versionAndType & COMPACT_VERSION_MASK
+	typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
+	if version != COMPACT_VERSION {
+		e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
+		err = NewTProtocolExceptionWithType(BAD_VERSION, e)
+		return
+	}
+	seqId, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	name, err = p.ReadString(ctx)
+	return
+}
+
+func (p *TCompactProtocol) ReadMessageEnd(ctx context.Context) error { return nil }
+
+// Read a struct begin. There's nothing on the wire for this, but it is our
+// opportunity to push a new struct begin marker onto the field stack.
+func (p *TCompactProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	p.lastField = append(p.lastField, p.lastFieldId)
+	p.lastFieldId = 0
+	return
+}
+
+// Doesn't actually consume any wire data, just removes the last field for
+// this struct from the field stack.
+func (p *TCompactProtocol) ReadStructEnd(ctx context.Context) error {
+	// consume the last field we read off the wire.
+	if len(p.lastField) <= 0 {
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New("ReadStructEnd called without matching ReadStructBegin call before"))
+	}
+	p.lastFieldId = p.lastField[len(p.lastField)-1]
+	p.lastField = p.lastField[:len(p.lastField)-1]
+	return nil
+}
+
+// Read a field header off the wire.
+func (p *TCompactProtocol) ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error) {
+	t, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+
+	// if it's a stop, then we can return immediately, as the struct is over.
+	if (t & 0x0f) == STOP {
+		return "", STOP, 0, nil
+	}
+
+	// mask off the 4 MSB of the type header. it could contain a field id delta.
+	modifier := int16((t & 0xf0) >> 4)
+	if modifier == 0 {
+		// not a delta. look ahead for the zigzag varint field id.
+		id, err = p.ReadI16(ctx)
+		if err != nil {
+			return
+		}
+	} else {
+		// has a delta. add the delta to the last read field id.
+		id = int16(p.lastFieldId) + modifier
+	}
+	typeId, e := p.getTType(tCompactType(t & 0x0f))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+
+	// if this happens to be a boolean field, the value is encoded in the type
+	if p.isBoolType(t) {
+		// save the boolean value in a special instance variable.
+		p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
+		p.boolValueIsNotNull = true
+	}
+
+	// push the new field onto the field stack so we can keep the deltas going.
+	p.lastFieldId = int(id)
+	return
+}
+
+func (p *TCompactProtocol) ReadFieldEnd(ctx context.Context) error { return nil }
+
+// Read a map header off the wire. If the size is zero, skip reading the key
+// and value type. This means that 0-length maps will yield TMaps without the
+// "correct" types.
+func (p *TCompactProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+	size32, e := p.readVarint32()
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	if size32 < 0 {
+		err = invalidDataLength
+		return
+	}
+	size = int(size32)
+
+	keyAndValueType := byte(STOP)
+	if size != 0 {
+		keyAndValueType, err = p.readByteDirect()
+		if err != nil {
+			return
+		}
+	}
+	keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
+	valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
+	return
+}
+
+func (p *TCompactProtocol) ReadMapEnd(ctx context.Context) error { return nil }
+
+// Read a list header off the wire. If the list size is 0-14, the size will
+// be packed into the element type header. If it's a longer list, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	size_and_type, err := p.readByteDirect()
+	if err != nil {
+		return
+	}
+	size = int((size_and_type >> 4) & 0x0f)
+	if size == 15 {
+		size2, e := p.readVarint32()
+		if e != nil {
+			err = NewTProtocolException(e)
+			return
+		}
+		if size2 < 0 {
+			err = invalidDataLength
+			return
+		}
+		size = int(size2)
+	}
+	elemType, e := p.getTType(tCompactType(size_and_type))
+	if e != nil {
+		err = NewTProtocolException(e)
+		return
+	}
+	return
+}
+
+func (p *TCompactProtocol) ReadListEnd(ctx context.Context) error { return nil }
+
+// Read a set header off the wire. If the set size is 0-14, the size will
+// be packed into the element type header. If it's a longer set, the 4 MSB
+// of the element type header will be 0xF, and a varint will follow with the
+// true size.
+func (p *TCompactProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.ReadListBegin(ctx)
+}
+
+func (p *TCompactProtocol) ReadSetEnd(ctx context.Context) error { return nil }
+
+// Read a boolean off the wire. If this is a boolean field, the value should
+// already have been read during readFieldBegin, so we'll just consume the
+// pre-stored value. Otherwise, read a byte.
+func (p *TCompactProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+	if p.boolValueIsNotNull {
+		p.boolValueIsNotNull = false
+		return p.boolValue, nil
+	}
+	v, err := p.readByteDirect()
+	return v == COMPACT_BOOLEAN_TRUE, err
+}
+
+// Read a single byte off the wire. Nothing interesting here.
+func (p *TCompactProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.readByteDirect()
+	if err != nil {
+		return 0, NewTProtocolException(err)
+	}
+	return int8(v), err
+}
+
+// Read an i16 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	v, err := p.ReadI32(ctx)
+	return int16(v), err
+}
+
+// Read an i32 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	v, e := p.readVarint32()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt32(v)
+	return value, nil
+}
+
+// Read an i64 from the wire as a zigzag varint.
+func (p *TCompactProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	v, e := p.readVarint64()
+	if e != nil {
+		return 0, NewTProtocolException(e)
+	}
+	value = p.zigzagToInt64(v)
+	return value, nil
+}
+
+// No magic here - just read a double off the wire.
+func (p *TCompactProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	longBits := p.buffer[0:8]
+	_, e := io.ReadFull(p.trans, longBits)
+	if e != nil {
+		return 0.0, NewTProtocolException(e)
+	}
+	return math.Float64frombits(p.bytesToUint64(longBits)), nil
+}
+
+// Reads a []byte (via readBinary), and then UTF-8 decodes it.
+func (p *TCompactProtocol) ReadString(ctx context.Context) (value string, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return "", NewTProtocolException(e)
+	}
+	err = checkSizeForProtocol(length, p.cfg)
+	if err != nil {
+		return
+	}
+	if length == 0 {
+		return "", nil
+	}
+	if length < int32(len(p.buffer)) {
+		// Avoid allocation on small reads
+		buf := p.buffer[:length]
+		read, e := io.ReadFull(p.trans, buf)
+		return string(buf[:read]), NewTProtocolException(e)
+	}
+
+	buf, e := safeReadBytes(length, p.trans)
+	return string(buf), NewTProtocolException(e)
+}
+
+// Read a []byte from the wire.
+func (p *TCompactProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+	length, e := p.readVarint32()
+	if e != nil {
+		return nil, NewTProtocolException(e)
+	}
+	err = checkSizeForProtocol(length, p.cfg)
+	if err != nil {
+		return
+	}
+	if length == 0 {
+		return []byte{}, nil
+	}
+
+	buf, e := safeReadBytes(length, p.trans)
+	return buf, NewTProtocolException(e)
+}
+
+func (p *TCompactProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.trans.Flush(ctx))
+}
+
+func (p *TCompactProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TCompactProtocol) Transport() TTransport {
+	return p.origTransport
+}
+
+//
+// Internal writing methods
+//
+
+// Abstract method for writing the start of lists and sets. List and sets on
+// the wire differ only by the type indicator.
+func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
+	if size <= 14 {
+		return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
+	}
+	err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
+	if err != nil {
+		return 0, err
+	}
+	m, err := p.writeVarint32(int32(size))
+	return 1 + m, err
+}
+
+// Write an i32 as a varint. Results in 1-5 bytes on the wire.
+// TODO(pomack): make a permanent buffer like writeVarint64?
+func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
+	i32buf := p.buffer[0:5]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			i32buf[idx] = byte(n)
+			idx++
+			// p.writeByteDirect(byte(n));
+			break
+			// return;
+		} else {
+			i32buf[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
+			u := uint32(n)
+			n = int32(u >> 7)
+		}
+	}
+	return p.trans.Write(i32buf[0:idx])
+}
+
+// Write an i64 as a varint. Results in 1-10 bytes on the wire.
+func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
+	varint64out := p.buffer[0:10]
+	idx := 0
+	for {
+		if (n & ^0x7F) == 0 {
+			varint64out[idx] = byte(n)
+			idx++
+			break
+		} else {
+			varint64out[idx] = byte((n & 0x7F) | 0x80)
+			idx++
+			u := uint64(n)
+			n = int64(u >> 7)
+		}
+	}
+	return p.trans.Write(varint64out[0:idx])
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
+	return (l << 1) ^ (l >> 63)
+}
+
+// Convert l into a zigzag long. This allows negative numbers to be
+// represented compactly as a varint.
+func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
+	return (n << 1) ^ (n >> 31)
+}
+
+func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, n)
+}
+
+func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
+	binary.LittleEndian.PutUint64(buf, uint64(n))
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+// Used internally by other writing methods that know they need to write a byte.
+func (p *TCompactProtocol) writeByteDirect(b byte) error {
+	return p.trans.WriteByte(b)
+}
+
+// Writes a byte without any possibility of all that field header nonsense.
+func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
+	return 1, p.writeByteDirect(byte(n))
+}
+
+//
+// Internal reading methods
+//
+
+// Read an i32 from the wire as a varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 5 bytes.
+func (p *TCompactProtocol) readVarint32() (int32, error) {
+	// if the wire contains the right stuff, this will just truncate the i64 we
+	// read and get us the right sign.
+	v, err := p.readVarint64()
+	return int32(v), err
+}
+
+// Read an i64 from the wire as a proper varint. The MSB of each byte is set
+// if there is another byte to follow. This can read up to 10 bytes.
+func (p *TCompactProtocol) readVarint64() (int64, error) {
+	shift := uint(0)
+	result := int64(0)
+	for {
+		b, err := p.readByteDirect()
+		if err != nil {
+			return 0, err
+		}
+		result |= int64(b&0x7f) << shift
+		if (b & 0x80) != 0x80 {
+			break
+		}
+		shift += 7
+	}
+	return result, nil
+}
+
+// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
+func (p *TCompactProtocol) readByteDirect() (byte, error) {
+	return p.trans.ReadByte()
+}
+
+//
+// encoding helpers
+//
+
+// Convert from zigzag int to int.
+func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
+	u := uint32(n)
+	return int32(u>>1) ^ -(n & 1)
+}
+
+// Convert from zigzag long to long.
+func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
+	u := uint64(n)
+	return int64(u>>1) ^ -(n & 1)
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
+	return int64(binary.LittleEndian.Uint64(b))
+}
+
+// Note that it's important that the mask bytes are long literals,
+// otherwise they'll default to ints, and when you shift an int left 56 bits,
+// you just get a messed up int.
+func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
+	return binary.LittleEndian.Uint64(b)
+}
+
+//
+// type testing and converting
+//
+
+func (p *TCompactProtocol) isBoolType(b byte) bool {
+	return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
+}
+
+// Given a tCompactType constant, convert it to its corresponding
+// TType value.
+func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
+	switch byte(t) & 0x0f {
+	case STOP:
+		return STOP, nil
+	case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
+		return BOOL, nil
+	case COMPACT_BYTE:
+		return BYTE, nil
+	case COMPACT_I16:
+		return I16, nil
+	case COMPACT_I32:
+		return I32, nil
+	case COMPACT_I64:
+		return I64, nil
+	case COMPACT_DOUBLE:
+		return DOUBLE, nil
+	case COMPACT_BINARY:
+		return STRING, nil
+	case COMPACT_LIST:
+		return LIST, nil
+	case COMPACT_SET:
+		return SET, nil
+	case COMPACT_MAP:
+		return MAP, nil
+	case COMPACT_STRUCT:
+		return STRUCT, nil
+	}
+	return STOP, NewTProtocolException(fmt.Errorf("don't know what type: %v", t&0x0f))
+}
+
+// Given a TType value, find the appropriate TCompactProtocol.Types constant.
+func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
+	return ttypeToCompactType[t]
+}
+
+func (p *TCompactProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+	PropagateTConfiguration(p.origTransport, conf)
+	p.cfg = conf
+}
+
+var (
+	_ TConfigurationSetter = (*TCompactProtocolFactory)(nil)
+	_ TConfigurationSetter = (*TCompactProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
new file mode 100644
index 0000000..454d9f3
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/configuration.go
@@ -0,0 +1,378 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"crypto/tls"
+	"fmt"
+	"time"
+)
+
+// Default TConfiguration values.
+const (
+	DEFAULT_MAX_MESSAGE_SIZE = 100 * 1024 * 1024
+	DEFAULT_MAX_FRAME_SIZE   = 16384000
+
+	DEFAULT_TBINARY_STRICT_READ  = false
+	DEFAULT_TBINARY_STRICT_WRITE = true
+
+	DEFAULT_CONNECT_TIMEOUT = 0
+	DEFAULT_SOCKET_TIMEOUT  = 0
+)
+
+// TConfiguration defines some configurations shared between TTransport,
+// TProtocol, TTransportFactory, TProtocolFactory, and other implementations.
+//
+// When constructing TConfiguration, you only need to specify the non-default
+// fields. All zero values have sane default values.
+//
+// Not all configurations defined are applicable to all implementations.
+// Implementations are free to ignore the configurations not applicable to them.
+//
+// All functions attached to this type are nil-safe.
+//
+// See [1] for spec.
+//
+// NOTE: When using TConfiguration, fill in all the configurations you want to
+// set across the stack, not only the ones you want to set in the immediate
+// TTransport/TProtocol.
+//
+// For example, say you want to migrate this old code into using TConfiguration:
+//
+//     sccket := thrift.NewTSocketTimeout("host:port", time.Second)
+//     transFactory := thrift.NewTFramedTransportFactoryMaxLength(
+//         thrift.NewTTransportFactory(),
+//         1024 * 1024 * 256,
+//     )
+//     protoFactory := thrift.NewTBinaryProtocolFactory(true, true)
+//
+// This is the wrong way to do it because in the end the TConfiguration used by
+// socket and transFactory will be overwritten by the one used by protoFactory
+// because of TConfiguration propagation:
+//
+//     // bad example, DO NOT USE
+//     sccket := thrift.NewTSocketConf("host:port", &thrift.TConfiguration{
+//         ConnectTimeout: time.Second,
+//         SocketTimeout:  time.Second,
+//     })
+//     transFactory := thrift.NewTFramedTransportFactoryConf(
+//         thrift.NewTTransportFactory(),
+//         &thrift.TConfiguration{
+//             MaxFrameSize: 1024 * 1024 * 256,
+//         },
+//     )
+//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(&thrift.TConfiguration{
+//         TBinaryStrictRead:  thrift.BoolPtr(true),
+//         TBinaryStrictWrite: thrift.BoolPtr(true),
+//     })
+//
+// This is the correct way to do it:
+//
+//     conf := &thrift.TConfiguration{
+//         ConnectTimeout: time.Second,
+//         SocketTimeout:  time.Second,
+//
+//         MaxFrameSize: 1024 * 1024 * 256,
+//
+//         TBinaryStrictRead:  thrift.BoolPtr(true),
+//         TBinaryStrictWrite: thrift.BoolPtr(true),
+//     }
+//     sccket := thrift.NewTSocketConf("host:port", conf)
+//     transFactory := thrift.NewTFramedTransportFactoryConf(thrift.NewTTransportFactory(), conf)
+//     protoFactory := thrift.NewTBinaryProtocolFactoryConf(conf)
+//
+// [1]: https://github.com/apache/thrift/blob/master/doc/specs/thrift-tconfiguration.md
+type TConfiguration struct {
+	// If <= 0, DEFAULT_MAX_MESSAGE_SIZE will be used instead.
+	MaxMessageSize int32
+
+	// If <= 0, DEFAULT_MAX_FRAME_SIZE will be used instead.
+	//
+	// Also if MaxMessageSize < MaxFrameSize,
+	// MaxMessageSize will be used instead.
+	MaxFrameSize int32
+
+	// Connect and socket timeouts to be used by TSocket and TSSLSocket.
+	//
+	// 0 means no timeout.
+	//
+	// If <0, DEFAULT_CONNECT_TIMEOUT and DEFAULT_SOCKET_TIMEOUT will be
+	// used.
+	ConnectTimeout time.Duration
+	SocketTimeout  time.Duration
+
+	// TLS config to be used by TSSLSocket.
+	TLSConfig *tls.Config
+
+	// Strict read/write configurations for TBinaryProtocol.
+	//
+	// BoolPtr helper function is available to use literal values.
+	TBinaryStrictRead  *bool
+	TBinaryStrictWrite *bool
+
+	// The wrapped protocol id to be used in THeader transport/protocol.
+	//
+	// THeaderProtocolIDPtr and THeaderProtocolIDPtrMust helper functions
+	// are provided to help filling this value.
+	THeaderProtocolID *THeaderProtocolID
+
+	// Used internally by deprecated constructors, to avoid overriding
+	// underlying TTransport/TProtocol's cfg by accidental propagations.
+	//
+	// For external users this is always false.
+	noPropagation bool
+}
+
+// GetMaxMessageSize returns the max message size an implementation should
+// follow.
+//
+// It's nil-safe. DEFAULT_MAX_MESSAGE_SIZE will be returned if tc is nil.
+func (tc *TConfiguration) GetMaxMessageSize() int32 {
+	if tc == nil || tc.MaxMessageSize <= 0 {
+		return DEFAULT_MAX_MESSAGE_SIZE
+	}
+	return tc.MaxMessageSize
+}
+
+// GetMaxFrameSize returns the max frame size an implementation should follow.
+//
+// It's nil-safe. DEFAULT_MAX_FRAME_SIZE will be returned if tc is nil.
+//
+// If the configured max message size is smaller than the configured max frame
+// size, the smaller one will be returned instead.
+func (tc *TConfiguration) GetMaxFrameSize() int32 {
+	if tc == nil {
+		return DEFAULT_MAX_FRAME_SIZE
+	}
+	maxFrameSize := tc.MaxFrameSize
+	if maxFrameSize <= 0 {
+		maxFrameSize = DEFAULT_MAX_FRAME_SIZE
+	}
+	if maxMessageSize := tc.GetMaxMessageSize(); maxMessageSize < maxFrameSize {
+		return maxMessageSize
+	}
+	return maxFrameSize
+}
+
+// GetConnectTimeout returns the connect timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_CONNECT_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetConnectTimeout() time.Duration {
+	if tc == nil || tc.ConnectTimeout < 0 {
+		return DEFAULT_CONNECT_TIMEOUT
+	}
+	return tc.ConnectTimeout
+}
+
+// GetSocketTimeout returns the socket timeout should be used by TSocket and
+// TSSLSocket.
+//
+// It's nil-safe. If tc is nil, DEFAULT_SOCKET_TIMEOUT will be returned instead.
+func (tc *TConfiguration) GetSocketTimeout() time.Duration {
+	if tc == nil || tc.SocketTimeout < 0 {
+		return DEFAULT_SOCKET_TIMEOUT
+	}
+	return tc.SocketTimeout
+}
+
+// GetTLSConfig returns the tls config should be used by TSSLSocket.
+//
+// It's nil-safe. If tc is nil, nil will be returned instead.
+func (tc *TConfiguration) GetTLSConfig() *tls.Config {
+	if tc == nil {
+		return nil
+	}
+	return tc.TLSConfig
+}
+
+// GetTBinaryStrictRead returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_READ will be returned if either tc or
+// tc.TBinaryStrictRead is nil.
+func (tc *TConfiguration) GetTBinaryStrictRead() bool {
+	if tc == nil || tc.TBinaryStrictRead == nil {
+		return DEFAULT_TBINARY_STRICT_READ
+	}
+	return *tc.TBinaryStrictRead
+}
+
+// GetTBinaryStrictWrite returns the strict read configuration TBinaryProtocol
+// should follow.
+//
+// It's nil-safe. DEFAULT_TBINARY_STRICT_WRITE will be returned if either tc or
+// tc.TBinaryStrictWrite is nil.
+func (tc *TConfiguration) GetTBinaryStrictWrite() bool {
+	if tc == nil || tc.TBinaryStrictWrite == nil {
+		return DEFAULT_TBINARY_STRICT_WRITE
+	}
+	return *tc.TBinaryStrictWrite
+}
+
+// GetTHeaderProtocolID returns the THeaderProtocolID should be used by
+// THeaderProtocol clients (for servers, they always use the same one as the
+// client instead).
+//
+// It's nil-safe. If either tc or tc.THeaderProtocolID is nil,
+// THeaderProtocolDefault will be returned instead.
+// THeaderProtocolDefault will also be returned if configured value is invalid.
+func (tc *TConfiguration) GetTHeaderProtocolID() THeaderProtocolID {
+	if tc == nil || tc.THeaderProtocolID == nil {
+		return THeaderProtocolDefault
+	}
+	protoID := *tc.THeaderProtocolID
+	if err := protoID.Validate(); err != nil {
+		return THeaderProtocolDefault
+	}
+	return protoID
+}
+
+// THeaderProtocolIDPtr validates and returns the pointer to id.
+//
+// If id is not a valid THeaderProtocolID, a pointer to THeaderProtocolDefault
+// and the validation error will be returned.
+func THeaderProtocolIDPtr(id THeaderProtocolID) (*THeaderProtocolID, error) {
+	err := id.Validate()
+	if err != nil {
+		id = THeaderProtocolDefault
+	}
+	return &id, err
+}
+
+// THeaderProtocolIDPtrMust validates and returns the pointer to id.
+//
+// It's similar to THeaderProtocolIDPtr, but it panics on validation errors
+// instead of returning them.
+func THeaderProtocolIDPtrMust(id THeaderProtocolID) *THeaderProtocolID {
+	ptr, err := THeaderProtocolIDPtr(id)
+	if err != nil {
+		panic(err)
+	}
+	return ptr
+}
+
+// TConfigurationSetter is an optional interface TProtocol, TTransport,
+// TProtocolFactory, TTransportFactory, and other implementations can implement.
+//
+// It's intended to be called during intializations.
+// The behavior of calling SetTConfiguration on a TTransport/TProtocol in the
+// middle of a message is undefined:
+// It may or may not change the behavior of the current processing message,
+// and it may even cause the current message to fail.
+//
+// Note for implementations: SetTConfiguration might be called multiple times
+// with the same value in quick successions due to the implementation of the
+// propagation. Implementations should make SetTConfiguration as simple as
+// possible (usually just overwrite the stored configuration and propagate it to
+// the wrapped TTransports/TProtocols).
+type TConfigurationSetter interface {
+	SetTConfiguration(*TConfiguration)
+}
+
+// PropagateTConfiguration propagates cfg to impl if impl implements
+// TConfigurationSetter and cfg is non-nil, otherwise it does nothing.
+//
+// NOTE: nil cfg is not propagated. If you want to propagate a TConfiguration
+// with everything being default value, use &TConfiguration{} explicitly instead.
+func PropagateTConfiguration(impl interface{}, cfg *TConfiguration) {
+	if cfg == nil || cfg.noPropagation {
+		return
+	}
+
+	if setter, ok := impl.(TConfigurationSetter); ok {
+		setter.SetTConfiguration(cfg)
+	}
+}
+
+func checkSizeForProtocol(size int32, cfg *TConfiguration) error {
+	if size < 0 {
+		return NewTProtocolExceptionWithType(
+			NEGATIVE_SIZE,
+			fmt.Errorf("negative size: %d", size),
+		)
+	}
+	if size > cfg.GetMaxMessageSize() {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			fmt.Errorf("size exceeded max allowed: %d", size),
+		)
+	}
+	return nil
+}
+
+type tTransportFactoryConf struct {
+	delegate TTransportFactory
+	cfg      *TConfiguration
+}
+
+func (f *tTransportFactoryConf) GetTransport(orig TTransport) (TTransport, error) {
+	trans, err := f.delegate.GetTransport(orig)
+	if err == nil {
+		PropagateTConfiguration(orig, f.cfg)
+		PropagateTConfiguration(trans, f.cfg)
+	}
+	return trans, err
+}
+
+func (f *tTransportFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.delegate, f.cfg)
+	f.cfg = cfg
+}
+
+// TTransportFactoryConf wraps a TTransportFactory to propagate
+// TConfiguration on the factory's GetTransport calls.
+func TTransportFactoryConf(delegate TTransportFactory, conf *TConfiguration) TTransportFactory {
+	return &tTransportFactoryConf{
+		delegate: delegate,
+		cfg:      conf,
+	}
+}
+
+type tProtocolFactoryConf struct {
+	delegate TProtocolFactory
+	cfg      *TConfiguration
+}
+
+func (f *tProtocolFactoryConf) GetProtocol(trans TTransport) TProtocol {
+	proto := f.delegate.GetProtocol(trans)
+	PropagateTConfiguration(trans, f.cfg)
+	PropagateTConfiguration(proto, f.cfg)
+	return proto
+}
+
+func (f *tProtocolFactoryConf) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.delegate, f.cfg)
+	f.cfg = cfg
+}
+
+// TProtocolFactoryConf wraps a TProtocolFactory to propagate
+// TConfiguration on the factory's GetProtocol calls.
+func TProtocolFactoryConf(delegate TProtocolFactory, conf *TConfiguration) TProtocolFactory {
+	return &tProtocolFactoryConf{
+		delegate: delegate,
+		cfg:      conf,
+	}
+}
+
+var (
+	_ TConfigurationSetter = (*tTransportFactoryConf)(nil)
+	_ TConfigurationSetter = (*tProtocolFactoryConf)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/context.go b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
new file mode 100644
index 0000000..d15c1bc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/context.go
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+var defaultCtx = context.Background()
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
new file mode 100644
index 0000000..53bf862
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/exception.go
@@ -0,0 +1,116 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+)
+
+// Generic Thrift exception
+type TException interface {
+	error
+
+	TExceptionType() TExceptionType
+}
+
+// Prepends additional information to an error without losing the Thrift exception interface
+func PrependError(prepend string, err error) error {
+	msg := prepend + err.Error()
+
+	var te TException
+	if errors.As(err, &te) {
+		switch te.TExceptionType() {
+		case TExceptionTypeTransport:
+			if t, ok := err.(TTransportException); ok {
+				return prependTTransportException(prepend, t)
+			}
+		case TExceptionTypeProtocol:
+			if t, ok := err.(TProtocolException); ok {
+				return prependTProtocolException(prepend, t)
+			}
+		case TExceptionTypeApplication:
+			var t TApplicationException
+			if errors.As(err, &t) {
+				return NewTApplicationException(t.TypeId(), msg)
+			}
+		}
+
+		return wrappedTException{
+			err:            err,
+			msg:            msg,
+			tExceptionType: te.TExceptionType(),
+		}
+	}
+
+	return errors.New(msg)
+}
+
+// TExceptionType is an enum type to categorize different "subclasses" of TExceptions.
+type TExceptionType byte
+
+// TExceptionType values
+const (
+	TExceptionTypeUnknown     TExceptionType = iota
+	TExceptionTypeCompiled                   // TExceptions defined in thrift files and generated by thrift compiler
+	TExceptionTypeApplication                // TApplicationExceptions
+	TExceptionTypeProtocol                   // TProtocolExceptions
+	TExceptionTypeTransport                  // TTransportExceptions
+)
+
+// WrapTException wraps an error into TException.
+//
+// If err is nil or already TException, it's returned as-is.
+// Otherwise it will be wraped into TException with TExceptionType() returning
+// TExceptionTypeUnknown, and Unwrap() returning the original error.
+func WrapTException(err error) TException {
+	if err == nil {
+		return nil
+	}
+
+	if te, ok := err.(TException); ok {
+		return te
+	}
+
+	return wrappedTException{
+		err:            err,
+		msg:            err.Error(),
+		tExceptionType: TExceptionTypeUnknown,
+	}
+}
+
+type wrappedTException struct {
+	err            error
+	msg            string
+	tExceptionType TExceptionType
+}
+
+func (w wrappedTException) Error() string {
+	return w.msg
+}
+
+func (w wrappedTException) TExceptionType() TExceptionType {
+	return w.tExceptionType
+}
+
+func (w wrappedTException) Unwrap() error {
+	return w.err
+}
+
+var _ TException = wrappedTException{}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
new file mode 100644
index 0000000..ac9bd48
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_context.go
@@ -0,0 +1,110 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type (
+	headerKey     string
+	headerKeyList int
+)
+
+// Values for headerKeyList.
+const (
+	headerKeyListRead headerKeyList = iota
+	headerKeyListWrite
+)
+
+// SetHeader sets a header in the context.
+func SetHeader(ctx context.Context, key, value string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKey(key),
+		value,
+	)
+}
+
+// UnsetHeader unsets a previously set header in the context.
+func UnsetHeader(ctx context.Context, key string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKey(key),
+		nil,
+	)
+}
+
+// GetHeader returns a value of the given header from the context.
+func GetHeader(ctx context.Context, key string) (value string, ok bool) {
+	if v := ctx.Value(headerKey(key)); v != nil {
+		value, ok = v.(string)
+	}
+	return
+}
+
+// SetReadHeaderList sets the key list of read THeaders in the context.
+func SetReadHeaderList(ctx context.Context, keys []string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKeyListRead,
+		keys,
+	)
+}
+
+// GetReadHeaderList returns the key list of read THeaders from the context.
+func GetReadHeaderList(ctx context.Context) []string {
+	if v := ctx.Value(headerKeyListRead); v != nil {
+		if value, ok := v.([]string); ok {
+			return value
+		}
+	}
+	return nil
+}
+
+// SetWriteHeaderList sets the key list of THeaders to write in the context.
+func SetWriteHeaderList(ctx context.Context, keys []string) context.Context {
+	return context.WithValue(
+		ctx,
+		headerKeyListWrite,
+		keys,
+	)
+}
+
+// GetWriteHeaderList returns the key list of THeaders to write from the context.
+func GetWriteHeaderList(ctx context.Context) []string {
+	if v := ctx.Value(headerKeyListWrite); v != nil {
+		if value, ok := v.([]string); ok {
+			return value
+		}
+	}
+	return nil
+}
+
+// AddReadTHeaderToContext adds the whole THeader headers into context.
+func AddReadTHeaderToContext(ctx context.Context, headers THeaderMap) context.Context {
+	keys := make([]string, 0, len(headers))
+	for key, value := range headers {
+		ctx = SetHeader(ctx, key, value)
+		keys = append(keys, key)
+	}
+	return SetReadHeaderList(ctx, keys)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
new file mode 100644
index 0000000..878041f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_protocol.go
@@ -0,0 +1,351 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+)
+
+// THeaderProtocol is a thrift protocol that implements THeader:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+//
+// It supports either binary or compact protocol as the wrapped protocol.
+//
+// Most of the THeader handlings are happening inside THeaderTransport.
+type THeaderProtocol struct {
+	transport *THeaderTransport
+
+	// Will be initialized on first read/write.
+	protocol TProtocol
+
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderProtocolConf instead.
+func NewTHeaderProtocol(trans TTransport) *THeaderProtocol {
+	return newTHeaderProtocolConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderProtocolConf creates a new THeaderProtocol from the underlying
+// transport with given TConfiguration.
+//
+// The passed in transport will be wrapped with THeaderTransport.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+func NewTHeaderProtocolConf(trans TTransport, conf *TConfiguration) *THeaderProtocol {
+	return newTHeaderProtocolConf(trans, conf)
+}
+
+func newTHeaderProtocolConf(trans TTransport, cfg *TConfiguration) *THeaderProtocol {
+	t := NewTHeaderTransportConf(trans, cfg)
+	p, _ := t.cfg.GetTHeaderProtocolID().GetProtocol(t)
+	PropagateTConfiguration(p, cfg)
+	return &THeaderProtocol{
+		transport: t,
+		protocol:  p,
+		cfg:       cfg,
+	}
+}
+
+type tHeaderProtocolFactory struct {
+	cfg *TConfiguration
+}
+
+func (f tHeaderProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return newTHeaderProtocolConf(trans, f.cfg)
+}
+
+func (f *tHeaderProtocolFactory) SetTConfiguration(cfg *TConfiguration) {
+	f.cfg = cfg
+}
+
+// Deprecated: Use NewTHeaderProtocolFactoryConf instead.
+func NewTHeaderProtocolFactory() TProtocolFactory {
+	return NewTHeaderProtocolFactoryConf(&TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderProtocolFactoryConf creates a factory for THeader with given
+// TConfiguration.
+func NewTHeaderProtocolFactoryConf(conf *TConfiguration) TProtocolFactory {
+	return tHeaderProtocolFactory{
+		cfg: conf,
+	}
+}
+
+// Transport returns the underlying transport.
+//
+// It's guaranteed to be of type *THeaderTransport.
+func (p *THeaderProtocol) Transport() TTransport {
+	return p.transport
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (p *THeaderProtocol) GetReadHeaders() THeaderMap {
+	return p.transport.GetReadHeaders()
+}
+
+// SetWriteHeader sets a header for write.
+func (p *THeaderProtocol) SetWriteHeader(key, value string) {
+	p.transport.SetWriteHeader(key, value)
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (p *THeaderProtocol) ClearWriteHeaders() {
+	p.transport.ClearWriteHeaders()
+}
+
+// AddTransform add a transform for writing.
+func (p *THeaderProtocol) AddTransform(transform THeaderTransformID) error {
+	return p.transport.AddTransform(transform)
+}
+
+func (p *THeaderProtocol) Flush(ctx context.Context) error {
+	return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteMessageBegin(ctx context.Context, name string, typeID TMessageType, seqID int32) error {
+	newProto, err := p.transport.Protocol().GetProtocol(p.transport)
+	if err != nil {
+		return err
+	}
+	PropagateTConfiguration(newProto, p.cfg)
+	p.protocol = newProto
+	p.transport.SequenceID = seqID
+	return p.protocol.WriteMessageBegin(ctx, name, typeID, seqID)
+}
+
+func (p *THeaderProtocol) WriteMessageEnd(ctx context.Context) error {
+	if err := p.protocol.WriteMessageEnd(ctx); err != nil {
+		return err
+	}
+	return p.transport.Flush(ctx)
+}
+
+func (p *THeaderProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	return p.protocol.WriteStructBegin(ctx, name)
+}
+
+func (p *THeaderProtocol) WriteStructEnd(ctx context.Context) error {
+	return p.protocol.WriteStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldBegin(ctx context.Context, name string, typeID TType, id int16) error {
+	return p.protocol.WriteFieldBegin(ctx, name, typeID, id)
+}
+
+func (p *THeaderProtocol) WriteFieldEnd(ctx context.Context) error {
+	return p.protocol.WriteFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteFieldStop(ctx context.Context) error {
+	return p.protocol.WriteFieldStop(ctx)
+}
+
+func (p *THeaderProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	return p.protocol.WriteMapBegin(ctx, keyType, valueType, size)
+}
+
+func (p *THeaderProtocol) WriteMapEnd(ctx context.Context) error {
+	return p.protocol.WriteMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	return p.protocol.WriteListBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteListEnd(ctx context.Context) error {
+	return p.protocol.WriteListEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	return p.protocol.WriteSetBegin(ctx, elemType, size)
+}
+
+func (p *THeaderProtocol) WriteSetEnd(ctx context.Context) error {
+	return p.protocol.WriteSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) WriteBool(ctx context.Context, value bool) error {
+	return p.protocol.WriteBool(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteByte(ctx context.Context, value int8) error {
+	return p.protocol.WriteByte(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI16(ctx context.Context, value int16) error {
+	return p.protocol.WriteI16(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI32(ctx context.Context, value int32) error {
+	return p.protocol.WriteI32(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteI64(ctx context.Context, value int64) error {
+	return p.protocol.WriteI64(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteDouble(ctx context.Context, value float64) error {
+	return p.protocol.WriteDouble(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteString(ctx context.Context, value string) error {
+	return p.protocol.WriteString(ctx, value)
+}
+
+func (p *THeaderProtocol) WriteBinary(ctx context.Context, value []byte) error {
+	return p.protocol.WriteBinary(ctx, value)
+}
+
+// ReadFrame calls underlying THeaderTransport's ReadFrame function.
+func (p *THeaderProtocol) ReadFrame(ctx context.Context) error {
+	return p.transport.ReadFrame(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageBegin(ctx context.Context) (name string, typeID TMessageType, seqID int32, err error) {
+	if err = p.transport.ReadFrame(ctx); err != nil {
+		return
+	}
+
+	var newProto TProtocol
+	newProto, err = p.transport.Protocol().GetProtocol(p.transport)
+	if err != nil {
+		var tAppExc TApplicationException
+		if !errors.As(err, &tAppExc) {
+			return
+		}
+		if e := p.protocol.WriteMessageBegin(ctx, "", EXCEPTION, seqID); e != nil {
+			return
+		}
+		if e := tAppExc.Write(ctx, p.protocol); e != nil {
+			return
+		}
+		if e := p.protocol.WriteMessageEnd(ctx); e != nil {
+			return
+		}
+		if e := p.transport.Flush(ctx); e != nil {
+			return
+		}
+		return
+	}
+	PropagateTConfiguration(newProto, p.cfg)
+	p.protocol = newProto
+
+	return p.protocol.ReadMessageBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMessageEnd(ctx context.Context) error {
+	return p.protocol.ReadMessageEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	return p.protocol.ReadStructBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadStructEnd(ctx context.Context) error {
+	return p.protocol.ReadStructEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldBegin(ctx context.Context) (name string, typeID TType, id int16, err error) {
+	return p.protocol.ReadFieldBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadFieldEnd(ctx context.Context) error {
+	return p.protocol.ReadFieldEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error) {
+	return p.protocol.ReadMapBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadMapEnd(ctx context.Context) error {
+	return p.protocol.ReadMapEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.protocol.ReadListBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadListEnd(ctx context.Context) error {
+	return p.protocol.ReadListEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, err error) {
+	return p.protocol.ReadSetBegin(ctx)
+}
+
+func (p *THeaderProtocol) ReadSetEnd(ctx context.Context) error {
+	return p.protocol.ReadSetEnd(ctx)
+}
+
+func (p *THeaderProtocol) ReadBool(ctx context.Context) (value bool, err error) {
+	return p.protocol.ReadBool(ctx)
+}
+
+func (p *THeaderProtocol) ReadByte(ctx context.Context) (value int8, err error) {
+	return p.protocol.ReadByte(ctx)
+}
+
+func (p *THeaderProtocol) ReadI16(ctx context.Context) (value int16, err error) {
+	return p.protocol.ReadI16(ctx)
+}
+
+func (p *THeaderProtocol) ReadI32(ctx context.Context) (value int32, err error) {
+	return p.protocol.ReadI32(ctx)
+}
+
+func (p *THeaderProtocol) ReadI64(ctx context.Context) (value int64, err error) {
+	return p.protocol.ReadI64(ctx)
+}
+
+func (p *THeaderProtocol) ReadDouble(ctx context.Context) (value float64, err error) {
+	return p.protocol.ReadDouble(ctx)
+}
+
+func (p *THeaderProtocol) ReadString(ctx context.Context) (value string, err error) {
+	return p.protocol.ReadString(ctx)
+}
+
+func (p *THeaderProtocol) ReadBinary(ctx context.Context) (value []byte, err error) {
+	return p.protocol.ReadBinary(ctx)
+}
+
+func (p *THeaderProtocol) Skip(ctx context.Context, fieldType TType) error {
+	return p.protocol.Skip(ctx, fieldType)
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (p *THeaderProtocol) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(p.transport, cfg)
+	PropagateTConfiguration(p.protocol, cfg)
+	p.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*tHeaderProtocolFactory)(nil)
+	_ TConfigurationSetter = (*THeaderProtocol)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
new file mode 100644
index 0000000..f5736df
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/header_transport.go
@@ -0,0 +1,810 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"compress/zlib"
+	"context"
+	"encoding/binary"
+	"errors"
+	"fmt"
+	"io"
+	"io/ioutil"
+)
+
+// Size in bytes for 32-bit ints.
+const size32 = 4
+
+type headerMeta struct {
+	MagicFlags   uint32
+	SequenceID   int32
+	HeaderLength uint16
+}
+
+const headerMetaSize = 10
+
+type clientType int
+
+const (
+	clientUnknown clientType = iota
+	clientHeaders
+	clientFramedBinary
+	clientUnframedBinary
+	clientFramedCompact
+	clientUnframedCompact
+)
+
+// Constants defined in THeader format:
+// https://github.com/apache/thrift/blob/master/doc/specs/HeaderFormat.md
+const (
+	THeaderHeaderMagic  uint32 = 0x0fff0000
+	THeaderHeaderMask   uint32 = 0xffff0000
+	THeaderFlagsMask    uint32 = 0x0000ffff
+	THeaderMaxFrameSize uint32 = 0x3fffffff
+)
+
+// THeaderMap is the type of the header map in THeader transport.
+type THeaderMap map[string]string
+
+// THeaderProtocolID is the wrapped protocol id used in THeader.
+type THeaderProtocolID int32
+
+// Supported THeaderProtocolID values.
+const (
+	THeaderProtocolBinary  THeaderProtocolID = 0x00
+	THeaderProtocolCompact THeaderProtocolID = 0x02
+	THeaderProtocolDefault                   = THeaderProtocolBinary
+)
+
+// Declared globally to avoid repetitive allocations, not really used.
+var globalMemoryBuffer = NewTMemoryBuffer()
+
+// Validate checks whether the THeaderProtocolID is a valid/supported one.
+func (id THeaderProtocolID) Validate() error {
+	_, err := id.GetProtocol(globalMemoryBuffer)
+	return err
+}
+
+// GetProtocol gets the corresponding TProtocol from the wrapped protocol id.
+func (id THeaderProtocolID) GetProtocol(trans TTransport) (TProtocol, error) {
+	switch id {
+	default:
+		return nil, NewTApplicationException(
+			INVALID_PROTOCOL,
+			fmt.Sprintf("THeader protocol id %d not supported", id),
+		)
+	case THeaderProtocolBinary:
+		return NewTBinaryProtocolTransport(trans), nil
+	case THeaderProtocolCompact:
+		return NewTCompactProtocol(trans), nil
+	}
+}
+
+// THeaderTransformID defines the numeric id of the transform used.
+type THeaderTransformID int32
+
+// THeaderTransformID values.
+//
+// Values not defined here are not currently supported, namely HMAC and Snappy.
+const (
+	TransformNone THeaderTransformID = iota // 0, no special handling
+	TransformZlib                           // 1, zlib
+)
+
+var supportedTransformIDs = map[THeaderTransformID]bool{
+	TransformNone: true,
+	TransformZlib: true,
+}
+
+// TransformReader is an io.ReadCloser that handles transforms reading.
+type TransformReader struct {
+	io.Reader
+
+	closers []io.Closer
+}
+
+var _ io.ReadCloser = (*TransformReader)(nil)
+
+// NewTransformReaderWithCapacity initializes a TransformReader with expected
+// closers capacity.
+//
+// If you don't know the closers capacity beforehand, just use
+//
+//     &TransformReader{Reader: baseReader}
+//
+// instead would be sufficient.
+func NewTransformReaderWithCapacity(baseReader io.Reader, capacity int) *TransformReader {
+	return &TransformReader{
+		Reader:  baseReader,
+		closers: make([]io.Closer, 0, capacity),
+	}
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tr *TransformReader) Close() error {
+	// Call closers in reversed order
+	for i := len(tr.closers) - 1; i >= 0; i-- {
+		if err := tr.closers[i].Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddTransform adds a transform.
+func (tr *TransformReader) AddTransform(id THeaderTransformID) error {
+	switch id {
+	default:
+		return NewTApplicationException(
+			INVALID_TRANSFORM,
+			fmt.Sprintf("THeaderTransformID %d not supported", id),
+		)
+	case TransformNone:
+		// no-op
+	case TransformZlib:
+		readCloser, err := zlib.NewReader(tr.Reader)
+		if err != nil {
+			return err
+		}
+		tr.Reader = readCloser
+		tr.closers = append(tr.closers, readCloser)
+	}
+	return nil
+}
+
+// TransformWriter is an io.WriteCloser that handles transforms writing.
+type TransformWriter struct {
+	io.Writer
+
+	closers []io.Closer
+}
+
+var _ io.WriteCloser = (*TransformWriter)(nil)
+
+// NewTransformWriter creates a new TransformWriter with base writer and transforms.
+func NewTransformWriter(baseWriter io.Writer, transforms []THeaderTransformID) (io.WriteCloser, error) {
+	writer := &TransformWriter{
+		Writer:  baseWriter,
+		closers: make([]io.Closer, 0, len(transforms)),
+	}
+	for _, id := range transforms {
+		if err := writer.AddTransform(id); err != nil {
+			return nil, err
+		}
+	}
+	return writer, nil
+}
+
+// Close calls the underlying closers in appropriate order,
+// stops at and returns the first error encountered.
+func (tw *TransformWriter) Close() error {
+	// Call closers in reversed order
+	for i := len(tw.closers) - 1; i >= 0; i-- {
+		if err := tw.closers[i].Close(); err != nil {
+			return err
+		}
+	}
+	return nil
+}
+
+// AddTransform adds a transform.
+func (tw *TransformWriter) AddTransform(id THeaderTransformID) error {
+	switch id {
+	default:
+		return NewTApplicationException(
+			INVALID_TRANSFORM,
+			fmt.Sprintf("THeaderTransformID %d not supported", id),
+		)
+	case TransformNone:
+		// no-op
+	case TransformZlib:
+		writeCloser := zlib.NewWriter(tw.Writer)
+		tw.Writer = writeCloser
+		tw.closers = append(tw.closers, writeCloser)
+	}
+	return nil
+}
+
+// THeaderInfoType is the type id of the info headers.
+type THeaderInfoType int32
+
+// Supported THeaderInfoType values.
+const (
+	_            THeaderInfoType = iota // Skip 0
+	InfoKeyValue                        // 1
+	// Rest of the info types are not supported.
+)
+
+// THeaderTransport is a Transport mode that implements THeader.
+//
+// Note that THeaderTransport handles frame and zlib by itself,
+// so the underlying transport should be a raw socket transports (TSocket or TSSLSocket),
+// instead of rich transports like TZlibTransport or TFramedTransport.
+type THeaderTransport struct {
+	SequenceID int32
+	Flags      uint32
+
+	transport TTransport
+
+	// THeaderMap for read and write
+	readHeaders  THeaderMap
+	writeHeaders THeaderMap
+
+	// Reading related variables.
+	reader *bufio.Reader
+	// When frame is detected, we read the frame fully into frameBuffer.
+	frameBuffer bytes.Buffer
+	// When it's non-nil, Read should read from frameReader instead of
+	// reader, and EOF error indicates end of frame instead of end of all
+	// transport.
+	frameReader io.ReadCloser
+
+	// Writing related variables
+	writeBuffer     bytes.Buffer
+	writeTransforms []THeaderTransformID
+
+	clientType clientType
+	protocolID THeaderProtocolID
+	cfg        *TConfiguration
+
+	// buffer is used in the following scenarios to avoid repetitive
+	// allocations, while 4 is big enough for all those scenarios:
+	//
+	// * header padding (max size 4)
+	// * write the frame size (size 4)
+	buffer [4]byte
+}
+
+var _ TTransport = (*THeaderTransport)(nil)
+
+// Deprecated: Use NewTHeaderTransportConf instead.
+func NewTHeaderTransport(trans TTransport) *THeaderTransport {
+	return NewTHeaderTransportConf(trans, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderTransportConf creates THeaderTransport from the
+// underlying transport, with given TConfiguration attached.
+//
+// If trans is already a *THeaderTransport, it will be returned as is,
+// but with TConfiguration overridden by the value passed in.
+//
+// The protocol ID in TConfiguration is only useful for client transports.
+// For servers,
+// the protocol ID will be overridden again to the one set by the client,
+// to ensure that servers always speak the same dialect as the client.
+func NewTHeaderTransportConf(trans TTransport, conf *TConfiguration) *THeaderTransport {
+	if ht, ok := trans.(*THeaderTransport); ok {
+		ht.SetTConfiguration(conf)
+		return ht
+	}
+	PropagateTConfiguration(trans, conf)
+	return &THeaderTransport{
+		transport:    trans,
+		reader:       bufio.NewReader(trans),
+		writeHeaders: make(THeaderMap),
+		protocolID:   conf.GetTHeaderProtocolID(),
+		cfg:          conf,
+	}
+}
+
+// Open calls the underlying transport's Open function.
+func (t *THeaderTransport) Open() error {
+	return t.transport.Open()
+}
+
+// IsOpen calls the underlying transport's IsOpen function.
+func (t *THeaderTransport) IsOpen() bool {
+	return t.transport.IsOpen()
+}
+
+// ReadFrame tries to read the frame header, guess the client type, and handle
+// unframed clients.
+func (t *THeaderTransport) ReadFrame(ctx context.Context) error {
+	if !t.needReadFrame() {
+		// No need to read frame, skipping.
+		return nil
+	}
+
+	// Peek and handle the first 32 bits.
+	// They could either be the length field of a framed message,
+	// or the first bytes of an unframed message.
+	var buf []byte
+	var err error
+	// This is also usually the first read from a connection,
+	// so handle retries around socket timeouts.
+	_, deadlineSet := ctx.Deadline()
+	for {
+		buf, err = t.reader.Peek(size32)
+		if deadlineSet && isTimeoutError(err) && ctx.Err() == nil {
+			// This is I/O timeout and we still have time,
+			// continue trying
+			continue
+		}
+		// For anything else, do not retry
+		break
+	}
+	if err != nil {
+		return err
+	}
+
+	frameSize := binary.BigEndian.Uint32(buf)
+	if frameSize&VERSION_MASK == VERSION_1 {
+		t.clientType = clientUnframedBinary
+		return nil
+	}
+	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+		t.clientType = clientUnframedCompact
+		return nil
+	}
+
+	// At this point it should be a framed message,
+	// sanity check on frameSize then discard the peeked part.
+	if frameSize > THeaderMaxFrameSize || frameSize > uint32(t.cfg.GetMaxFrameSize()) {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			errors.New("frame too large"),
+		)
+	}
+	t.reader.Discard(size32)
+
+	// Read the frame fully into frameBuffer.
+	_, err = io.CopyN(&t.frameBuffer, t.reader, int64(frameSize))
+	if err != nil {
+		return err
+	}
+	t.frameReader = ioutil.NopCloser(&t.frameBuffer)
+
+	// Peek and handle the next 32 bits.
+	buf = t.frameBuffer.Bytes()[:size32]
+	version := binary.BigEndian.Uint32(buf)
+	if version&THeaderHeaderMask == THeaderHeaderMagic {
+		t.clientType = clientHeaders
+		return t.parseHeaders(ctx, frameSize)
+	}
+	if version&VERSION_MASK == VERSION_1 {
+		t.clientType = clientFramedBinary
+		return nil
+	}
+	if buf[0] == COMPACT_PROTOCOL_ID && buf[1]&COMPACT_VERSION_MASK == COMPACT_VERSION {
+		t.clientType = clientFramedCompact
+		return nil
+	}
+	if err := t.endOfFrame(); err != nil {
+		return err
+	}
+	return NewTProtocolExceptionWithType(
+		NOT_IMPLEMENTED,
+		errors.New("unsupported client transport type"),
+	)
+}
+
+// endOfFrame does end of frame handling.
+//
+// It closes frameReader, and also resets frame related states.
+func (t *THeaderTransport) endOfFrame() error {
+	defer func() {
+		t.frameBuffer.Reset()
+		t.frameReader = nil
+	}()
+	return t.frameReader.Close()
+}
+
+func (t *THeaderTransport) parseHeaders(ctx context.Context, frameSize uint32) error {
+	if t.clientType != clientHeaders {
+		return nil
+	}
+
+	var err error
+	var meta headerMeta
+	if err = binary.Read(&t.frameBuffer, binary.BigEndian, &meta); err != nil {
+		return err
+	}
+	frameSize -= headerMetaSize
+	t.Flags = meta.MagicFlags & THeaderFlagsMask
+	t.SequenceID = meta.SequenceID
+	headerLength := int64(meta.HeaderLength) * 4
+	if int64(frameSize) < headerLength {
+		return NewTProtocolExceptionWithType(
+			SIZE_LIMIT,
+			errors.New("header size is larger than the whole frame"),
+		)
+	}
+	headerBuf := NewTMemoryBuffer()
+	_, err = io.CopyN(headerBuf, &t.frameBuffer, headerLength)
+	if err != nil {
+		return err
+	}
+	hp := NewTCompactProtocol(headerBuf)
+	hp.SetTConfiguration(t.cfg)
+
+	// At this point the header is already read into headerBuf,
+	// and t.frameBuffer starts from the actual payload.
+	protoID, err := hp.readVarint32()
+	if err != nil {
+		return err
+	}
+	t.protocolID = THeaderProtocolID(protoID)
+
+	var transformCount int32
+	transformCount, err = hp.readVarint32()
+	if err != nil {
+		return err
+	}
+	if transformCount > 0 {
+		reader := NewTransformReaderWithCapacity(
+			&t.frameBuffer,
+			int(transformCount),
+		)
+		t.frameReader = reader
+		transformIDs := make([]THeaderTransformID, transformCount)
+		for i := 0; i < int(transformCount); i++ {
+			id, err := hp.readVarint32()
+			if err != nil {
+				return err
+			}
+			transformIDs[i] = THeaderTransformID(id)
+		}
+		// The transform IDs on the wire was added based on the order of
+		// writing, so on the reading side we need to reverse the order.
+		for i := transformCount - 1; i >= 0; i-- {
+			id := transformIDs[i]
+			if err := reader.AddTransform(id); err != nil {
+				return err
+			}
+		}
+	}
+
+	// The info part does not use the transforms yet, so it's
+	// important to continue using headerBuf.
+	headers := make(THeaderMap)
+	for {
+		infoType, err := hp.readVarint32()
+		if errors.Is(err, io.EOF) {
+			break
+		}
+		if err != nil {
+			return err
+		}
+		if THeaderInfoType(infoType) == InfoKeyValue {
+			count, err := hp.readVarint32()
+			if err != nil {
+				return err
+			}
+			for i := 0; i < int(count); i++ {
+				key, err := hp.ReadString(ctx)
+				if err != nil {
+					return err
+				}
+				value, err := hp.ReadString(ctx)
+				if err != nil {
+					return err
+				}
+				headers[key] = value
+			}
+		} else {
+			// Skip reading info section on the first
+			// unsupported info type.
+			break
+		}
+	}
+	t.readHeaders = headers
+
+	return nil
+}
+
+func (t *THeaderTransport) needReadFrame() bool {
+	if t.clientType == clientUnknown {
+		// This is a new connection that's never read before.
+		return true
+	}
+	if t.isFramed() && t.frameReader == nil {
+		// We just finished the last frame.
+		return true
+	}
+	return false
+}
+
+func (t *THeaderTransport) Read(p []byte) (read int, err error) {
+	// Here using context.Background instead of a context passed in is safe.
+	// First is that there's no way to pass context into this function.
+	// Then, 99% of the case when calling this Read frame is already read
+	// into frameReader. ReadFrame here is more of preventing bugs that
+	// didn't call ReadFrame before calling Read.
+	err = t.ReadFrame(context.Background())
+	if err != nil {
+		return
+	}
+	if t.frameReader != nil {
+		read, err = t.frameReader.Read(p)
+		if err == nil && t.frameBuffer.Len() <= 0 {
+			// the last Read finished the frame, do endOfFrame
+			// handling here.
+			err = t.endOfFrame()
+		} else if err == io.EOF {
+			err = t.endOfFrame()
+			if err != nil {
+				return
+			}
+			if read == 0 {
+				// Try to read the next frame when we hit EOF
+				// (end of frame) immediately.
+				// When we got here, it means the last read
+				// finished the previous frame, but didn't
+				// do endOfFrame handling yet.
+				// We have to read the next frame here,
+				// as otherwise we would return 0 and nil,
+				// which is a case not handled well by most
+				// protocol implementations.
+				return t.Read(p)
+			}
+		}
+		return
+	}
+	return t.reader.Read(p)
+}
+
+// Write writes data to the write buffer.
+//
+// You need to call Flush to actually write them to the transport.
+func (t *THeaderTransport) Write(p []byte) (int, error) {
+	return t.writeBuffer.Write(p)
+}
+
+// Flush writes the appropriate header and the write buffer to the underlying transport.
+func (t *THeaderTransport) Flush(ctx context.Context) error {
+	if t.writeBuffer.Len() == 0 {
+		return nil
+	}
+
+	defer t.writeBuffer.Reset()
+
+	switch t.clientType {
+	default:
+		fallthrough
+	case clientUnknown:
+		t.clientType = clientHeaders
+		fallthrough
+	case clientHeaders:
+		headers := NewTMemoryBuffer()
+		hp := NewTCompactProtocol(headers)
+		hp.SetTConfiguration(t.cfg)
+		if _, err := hp.writeVarint32(int32(t.protocolID)); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := hp.writeVarint32(int32(len(t.writeTransforms))); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		for _, transform := range t.writeTransforms {
+			if _, err := hp.writeVarint32(int32(transform)); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+		}
+		if len(t.writeHeaders) > 0 {
+			if _, err := hp.writeVarint32(int32(InfoKeyValue)); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+			if _, err := hp.writeVarint32(int32(len(t.writeHeaders))); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+			for key, value := range t.writeHeaders {
+				if err := hp.WriteString(ctx, key); err != nil {
+					return NewTTransportExceptionFromError(err)
+				}
+				if err := hp.WriteString(ctx, value); err != nil {
+					return NewTTransportExceptionFromError(err)
+				}
+			}
+		}
+		padding := 4 - headers.Len()%4
+		if padding < 4 {
+			buf := t.buffer[:padding]
+			for i := range buf {
+				buf[i] = 0
+			}
+			if _, err := headers.Write(buf); err != nil {
+				return NewTTransportExceptionFromError(err)
+			}
+		}
+
+		var payload bytes.Buffer
+		meta := headerMeta{
+			MagicFlags:   THeaderHeaderMagic + t.Flags&THeaderFlagsMask,
+			SequenceID:   t.SequenceID,
+			HeaderLength: uint16(headers.Len() / 4),
+		}
+		if err := binary.Write(&payload, binary.BigEndian, meta); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := io.Copy(&payload, headers); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+		writer, err := NewTransformWriter(&payload, t.writeTransforms)
+		if err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if _, err := io.Copy(writer, &t.writeBuffer); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		if err := writer.Close(); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+		// First write frame length
+		buf := t.buffer[:size32]
+		binary.BigEndian.PutUint32(buf, uint32(payload.Len()))
+		if _, err := t.transport.Write(buf); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		// Then write the payload
+		if _, err := io.Copy(t.transport, &payload); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+
+	case clientFramedBinary, clientFramedCompact:
+		buf := t.buffer[:size32]
+		binary.BigEndian.PutUint32(buf, uint32(t.writeBuffer.Len()))
+		if _, err := t.transport.Write(buf); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+		fallthrough
+	case clientUnframedBinary, clientUnframedCompact:
+		if _, err := io.Copy(t.transport, &t.writeBuffer); err != nil {
+			return NewTTransportExceptionFromError(err)
+		}
+	}
+
+	select {
+	default:
+	case <-ctx.Done():
+		return NewTTransportExceptionFromError(ctx.Err())
+	}
+
+	return t.transport.Flush(ctx)
+}
+
+// Close closes the transport, along with its underlying transport.
+func (t *THeaderTransport) Close() error {
+	if err := t.Flush(context.Background()); err != nil {
+		return err
+	}
+	return t.transport.Close()
+}
+
+// RemainingBytes calls underlying transport's RemainingBytes.
+//
+// Even in framed cases, because of all the possible compression transforms
+// involved, the remaining frame size is likely to be different from the actual
+// remaining readable bytes, so we don't bother to keep tracking the remaining
+// frame size by ourselves and just use the underlying transport's
+// RemainingBytes directly.
+func (t *THeaderTransport) RemainingBytes() uint64 {
+	return t.transport.RemainingBytes()
+}
+
+// GetReadHeaders returns the THeaderMap read from transport.
+func (t *THeaderTransport) GetReadHeaders() THeaderMap {
+	return t.readHeaders
+}
+
+// SetWriteHeader sets a header for write.
+func (t *THeaderTransport) SetWriteHeader(key, value string) {
+	t.writeHeaders[key] = value
+}
+
+// ClearWriteHeaders clears all write headers previously set.
+func (t *THeaderTransport) ClearWriteHeaders() {
+	t.writeHeaders = make(THeaderMap)
+}
+
+// AddTransform add a transform for writing.
+func (t *THeaderTransport) AddTransform(transform THeaderTransformID) error {
+	if !supportedTransformIDs[transform] {
+		return NewTProtocolExceptionWithType(
+			NOT_IMPLEMENTED,
+			fmt.Errorf("THeaderTransformID %d not supported", transform),
+		)
+	}
+	t.writeTransforms = append(t.writeTransforms, transform)
+	return nil
+}
+
+// Protocol returns the wrapped protocol id used in this THeaderTransport.
+func (t *THeaderTransport) Protocol() THeaderProtocolID {
+	switch t.clientType {
+	default:
+		return t.protocolID
+	case clientFramedBinary, clientUnframedBinary:
+		return THeaderProtocolBinary
+	case clientFramedCompact, clientUnframedCompact:
+		return THeaderProtocolCompact
+	}
+}
+
+func (t *THeaderTransport) isFramed() bool {
+	switch t.clientType {
+	default:
+		return false
+	case clientHeaders, clientFramedBinary, clientFramedCompact:
+		return true
+	}
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (t *THeaderTransport) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(t.transport, cfg)
+	t.cfg = cfg
+}
+
+// THeaderTransportFactory is a TTransportFactory implementation to create
+// THeaderTransport.
+//
+// It also implements TConfigurationSetter.
+type THeaderTransportFactory struct {
+	// The underlying factory, could be nil.
+	Factory TTransportFactory
+
+	cfg *TConfiguration
+}
+
+// Deprecated: Use NewTHeaderTransportFactoryConf instead.
+func NewTHeaderTransportFactory(factory TTransportFactory) TTransportFactory {
+	return NewTHeaderTransportFactoryConf(factory, &TConfiguration{
+		noPropagation: true,
+	})
+}
+
+// NewTHeaderTransportFactoryConf creates a new *THeaderTransportFactory with
+// the given *TConfiguration.
+func NewTHeaderTransportFactoryConf(factory TTransportFactory, conf *TConfiguration) TTransportFactory {
+	return &THeaderTransportFactory{
+		Factory: factory,
+
+		cfg: conf,
+	}
+}
+
+// GetTransport implements TTransportFactory.
+func (f *THeaderTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if f.Factory != nil {
+		t, err := f.Factory.GetTransport(trans)
+		if err != nil {
+			return nil, err
+		}
+		return NewTHeaderTransportConf(t, f.cfg), nil
+	}
+	return NewTHeaderTransportConf(trans, f.cfg), nil
+}
+
+// SetTConfiguration implements TConfigurationSetter.
+func (f *THeaderTransportFactory) SetTConfiguration(cfg *TConfiguration) {
+	PropagateTConfiguration(f.Factory, f.cfg)
+	f.cfg = cfg
+}
+
+var (
+	_ TConfigurationSetter = (*THeaderTransportFactory)(nil)
+	_ TConfigurationSetter = (*THeaderTransport)(nil)
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/logger.go b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
new file mode 100644
index 0000000..50d44ec
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/logger.go
@@ -0,0 +1,59 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"log"
+	"os"
+)
+
+// Logger is a simple wrapper of a logging function.
+//
+// In reality the users might actually use different logging libraries, and they
+// are not always compatible with each other.
+//
+// Logger is meant to be a simple common ground that it's easy to wrap whatever
+// logging library they use into.
+//
+// See https://issues.apache.org/jira/browse/THRIFT-4985 for the design
+// discussion behind it.
+type Logger func(msg string)
+
+// NopLogger is a Logger implementation that does nothing.
+func NopLogger(msg string) {}
+
+// StdLogger wraps stdlib log package into a Logger.
+//
+// If logger passed in is nil, it will fallback to use stderr and default flags.
+func StdLogger(logger *log.Logger) Logger {
+	if logger == nil {
+		logger = log.New(os.Stderr, "", log.LstdFlags)
+	}
+	return func(msg string) {
+		logger.Print(msg)
+	}
+}
+
+func fallbackLogger(logger Logger) Logger {
+	if logger == nil {
+		return StdLogger(nil)
+	}
+	return logger
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
new file mode 100644
index 0000000..5936d27
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/memory_buffer.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bytes"
+	"context"
+)
+
+// Memory buffer-based implementation of the TTransport interface.
+type TMemoryBuffer struct {
+	*bytes.Buffer
+	size int
+}
+
+type TMemoryBufferTransportFactory struct {
+	size int
+}
+
+func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	if trans != nil {
+		t, ok := trans.(*TMemoryBuffer)
+		if ok && t.size > 0 {
+			return NewTMemoryBufferLen(t.size), nil
+		}
+	}
+	return NewTMemoryBufferLen(p.size), nil
+}
+
+func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
+	return &TMemoryBufferTransportFactory{size: size}
+}
+
+func NewTMemoryBuffer() *TMemoryBuffer {
+	return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
+}
+
+func NewTMemoryBufferLen(size int) *TMemoryBuffer {
+	buf := make([]byte, 0, size)
+	return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
+}
+
+func (p *TMemoryBuffer) IsOpen() bool {
+	return true
+}
+
+func (p *TMemoryBuffer) Open() error {
+	return nil
+}
+
+func (p *TMemoryBuffer) Close() error {
+	p.Buffer.Reset()
+	return nil
+}
+
+// Flushing a memory buffer is a no-op
+func (p *TMemoryBuffer) Flush(ctx context.Context) error {
+	return nil
+}
+
+func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
+	return uint64(p.Buffer.Len())
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
new file mode 100644
index 0000000..25ab2e9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/messagetype.go
@@ -0,0 +1,31 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Message type constants in the Thrift protocol.
+type TMessageType int32
+
+const (
+	INVALID_TMESSAGE_TYPE TMessageType = 0
+	CALL                  TMessageType = 1
+	REPLY                 TMessageType = 2
+	EXCEPTION             TMessageType = 3
+	ONEWAY                TMessageType = 4
+)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
new file mode 100644
index 0000000..e4512d2
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/numeric.go
@@ -0,0 +1,164 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"math"
+	"strconv"
+)
+
+type Numeric interface {
+	Int64() int64
+	Int32() int32
+	Int16() int16
+	Byte() byte
+	Int() int
+	Float64() float64
+	Float32() float32
+	String() string
+	isNull() bool
+}
+
+type numeric struct {
+	iValue int64
+	dValue float64
+	sValue string
+	isNil  bool
+}
+
+var (
+	INFINITY          Numeric
+	NEGATIVE_INFINITY Numeric
+	NAN               Numeric
+	ZERO              Numeric
+	NUMERIC_NULL      Numeric
+)
+
+func NewNumericFromDouble(dValue float64) Numeric {
+	if math.IsInf(dValue, 1) {
+		return INFINITY
+	}
+	if math.IsInf(dValue, -1) {
+		return NEGATIVE_INFINITY
+	}
+	if math.IsNaN(dValue) {
+		return NAN
+	}
+	iValue := int64(dValue)
+	sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI64(iValue int64) Numeric {
+	dValue := float64(iValue)
+	sValue := strconv.FormatInt(iValue, 10)
+	isNil := false
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromI32(iValue int32) Numeric {
+	dValue := float64(iValue)
+	sValue := strconv.FormatInt(int64(iValue), 10)
+	isNil := false
+	return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromString(sValue string) Numeric {
+	if sValue == INFINITY.String() {
+		return INFINITY
+	}
+	if sValue == NEGATIVE_INFINITY.String() {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == NAN.String() {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	isNil := len(sValue) == 0
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
+}
+
+func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
+	if isNull {
+		return NewNullNumeric()
+	}
+	if sValue == JSON_INFINITY {
+		return INFINITY
+	}
+	if sValue == JSON_NEGATIVE_INFINITY {
+		return NEGATIVE_INFINITY
+	}
+	if sValue == JSON_NAN {
+		return NAN
+	}
+	iValue, _ := strconv.ParseInt(sValue, 10, 64)
+	dValue, _ := strconv.ParseFloat(sValue, 64)
+	return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
+}
+
+func NewNullNumeric() Numeric {
+	return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
+}
+
+func (p *numeric) Int64() int64 {
+	return p.iValue
+}
+
+func (p *numeric) Int32() int32 {
+	return int32(p.iValue)
+}
+
+func (p *numeric) Int16() int16 {
+	return int16(p.iValue)
+}
+
+func (p *numeric) Byte() byte {
+	return byte(p.iValue)
+}
+
+func (p *numeric) Int() int {
+	return int(p.iValue)
+}
+
+func (p *numeric) Float64() float64 {
+	return p.dValue
+}
+
+func (p *numeric) Float32() float32 {
+	return float32(p.dValue)
+}
+
+func (p *numeric) String() string {
+	return p.sValue
+}
+
+func (p *numeric) isNull() bool {
+	return p.isNil
+}
+
+func init() {
+	INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
+	NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
+	NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
+	ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
+	NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
new file mode 100644
index 0000000..245a3cc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/processor_factory.go
@@ -0,0 +1,80 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import "context"
+
+// A processor is a generic object which operates upon an input stream and
+// writes to some output stream.
+type TProcessor interface {
+	Process(ctx context.Context, in, out TProtocol) (bool, TException)
+
+	// ProcessorMap returns a map of thrift method names to TProcessorFunctions.
+	ProcessorMap() map[string]TProcessorFunction
+
+	// AddToProcessorMap adds the given TProcessorFunction to the internal
+	// processor map at the given key.
+	//
+	// If one is already set at the given key, it will be replaced with the new
+	// TProcessorFunction.
+	AddToProcessorMap(string, TProcessorFunction)
+}
+
+type TProcessorFunction interface {
+	Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
+}
+
+// The default processor factory just returns a singleton
+// instance.
+type TProcessorFactory interface {
+	GetProcessor(trans TTransport) TProcessor
+}
+
+type tProcessorFactory struct {
+	processor TProcessor
+}
+
+func NewTProcessorFactory(p TProcessor) TProcessorFactory {
+	return &tProcessorFactory{processor: p}
+}
+
+func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
+	return p.processor
+}
+
+/**
+ * The default processor factory just returns a singleton
+ * instance.
+ */
+type TProcessorFunctionFactory interface {
+	GetProcessorFunction(trans TTransport) TProcessorFunction
+}
+
+type tProcessorFunctionFactory struct {
+	processor TProcessorFunction
+}
+
+func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
+	return &tProcessorFunctionFactory{processor: p}
+}
+
+func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
+	return p.processor
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
new file mode 100644
index 0000000..0a69bd4
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol.go
@@ -0,0 +1,177 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+	"fmt"
+)
+
+const (
+	VERSION_MASK = 0xffff0000
+	VERSION_1    = 0x80010000
+)
+
+type TProtocol interface {
+	WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqid int32) error
+	WriteMessageEnd(ctx context.Context) error
+	WriteStructBegin(ctx context.Context, name string) error
+	WriteStructEnd(ctx context.Context) error
+	WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error
+	WriteFieldEnd(ctx context.Context) error
+	WriteFieldStop(ctx context.Context) error
+	WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error
+	WriteMapEnd(ctx context.Context) error
+	WriteListBegin(ctx context.Context, elemType TType, size int) error
+	WriteListEnd(ctx context.Context) error
+	WriteSetBegin(ctx context.Context, elemType TType, size int) error
+	WriteSetEnd(ctx context.Context) error
+	WriteBool(ctx context.Context, value bool) error
+	WriteByte(ctx context.Context, value int8) error
+	WriteI16(ctx context.Context, value int16) error
+	WriteI32(ctx context.Context, value int32) error
+	WriteI64(ctx context.Context, value int64) error
+	WriteDouble(ctx context.Context, value float64) error
+	WriteString(ctx context.Context, value string) error
+	WriteBinary(ctx context.Context, value []byte) error
+
+	ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqid int32, err error)
+	ReadMessageEnd(ctx context.Context) error
+	ReadStructBegin(ctx context.Context) (name string, err error)
+	ReadStructEnd(ctx context.Context) error
+	ReadFieldBegin(ctx context.Context) (name string, typeId TType, id int16, err error)
+	ReadFieldEnd(ctx context.Context) error
+	ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, err error)
+	ReadMapEnd(ctx context.Context) error
+	ReadListBegin(ctx context.Context) (elemType TType, size int, err error)
+	ReadListEnd(ctx context.Context) error
+	ReadSetBegin(ctx context.Context) (elemType TType, size int, err error)
+	ReadSetEnd(ctx context.Context) error
+	ReadBool(ctx context.Context) (value bool, err error)
+	ReadByte(ctx context.Context) (value int8, err error)
+	ReadI16(ctx context.Context) (value int16, err error)
+	ReadI32(ctx context.Context) (value int32, err error)
+	ReadI64(ctx context.Context) (value int64, err error)
+	ReadDouble(ctx context.Context) (value float64, err error)
+	ReadString(ctx context.Context) (value string, err error)
+	ReadBinary(ctx context.Context) (value []byte, err error)
+
+	Skip(ctx context.Context, fieldType TType) (err error)
+	Flush(ctx context.Context) (err error)
+
+	Transport() TTransport
+}
+
+// The maximum recursive depth the skip() function will traverse
+const DEFAULT_RECURSION_DEPTH = 64
+
+// Skips over the next data element from the provided input TProtocol object.
+func SkipDefaultDepth(ctx context.Context, prot TProtocol, typeId TType) (err error) {
+	return Skip(ctx, prot, typeId, DEFAULT_RECURSION_DEPTH)
+}
+
+// Skips over the next data element from the provided input TProtocol object.
+func Skip(ctx context.Context, self TProtocol, fieldType TType, maxDepth int) (err error) {
+
+	if maxDepth <= 0 {
+		return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded"))
+	}
+
+	switch fieldType {
+	case BOOL:
+		_, err = self.ReadBool(ctx)
+		return
+	case BYTE:
+		_, err = self.ReadByte(ctx)
+		return
+	case I16:
+		_, err = self.ReadI16(ctx)
+		return
+	case I32:
+		_, err = self.ReadI32(ctx)
+		return
+	case I64:
+		_, err = self.ReadI64(ctx)
+		return
+	case DOUBLE:
+		_, err = self.ReadDouble(ctx)
+		return
+	case STRING:
+		_, err = self.ReadString(ctx)
+		return
+	case STRUCT:
+		if _, err = self.ReadStructBegin(ctx); err != nil {
+			return err
+		}
+		for {
+			_, typeId, _, _ := self.ReadFieldBegin(ctx)
+			if typeId == STOP {
+				break
+			}
+			err := Skip(ctx, self, typeId, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.ReadFieldEnd(ctx)
+		}
+		return self.ReadStructEnd(ctx)
+	case MAP:
+		keyType, valueType, size, err := self.ReadMapBegin(ctx)
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(ctx, self, keyType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+			self.Skip(ctx, valueType)
+		}
+		return self.ReadMapEnd(ctx)
+	case SET:
+		elemType, size, err := self.ReadSetBegin(ctx)
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(ctx, self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadSetEnd(ctx)
+	case LIST:
+		elemType, size, err := self.ReadListBegin(ctx)
+		if err != nil {
+			return err
+		}
+		for i := 0; i < size; i++ {
+			err := Skip(ctx, self, elemType, maxDepth-1)
+			if err != nil {
+				return err
+			}
+		}
+		return self.ReadListEnd(ctx)
+	default:
+		return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType)))
+	}
+	return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
new file mode 100644
index 0000000..9dcf4bf
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_exception.go
@@ -0,0 +1,104 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"encoding/base64"
+	"errors"
+)
+
+// Thrift Protocol exception
+type TProtocolException interface {
+	TException
+	TypeId() int
+}
+
+const (
+	UNKNOWN_PROTOCOL_EXCEPTION = 0
+	INVALID_DATA               = 1
+	NEGATIVE_SIZE              = 2
+	SIZE_LIMIT                 = 3
+	BAD_VERSION                = 4
+	NOT_IMPLEMENTED            = 5
+	DEPTH_LIMIT                = 6
+)
+
+type tProtocolException struct {
+	typeId int
+	err    error
+	msg    string
+}
+
+var _ TProtocolException = (*tProtocolException)(nil)
+
+func (tProtocolException) TExceptionType() TExceptionType {
+	return TExceptionTypeProtocol
+}
+
+func (p *tProtocolException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tProtocolException) String() string {
+	return p.msg
+}
+
+func (p *tProtocolException) Error() string {
+	return p.msg
+}
+
+func (p *tProtocolException) Unwrap() error {
+	return p.err
+}
+
+func NewTProtocolException(err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+
+	if e, ok := err.(TProtocolException); ok {
+		return e
+	}
+
+	if errors.As(err, new(base64.CorruptInputError)) {
+		return NewTProtocolExceptionWithType(INVALID_DATA, err)
+	}
+
+	return NewTProtocolExceptionWithType(UNKNOWN_PROTOCOL_EXCEPTION, err)
+}
+
+func NewTProtocolExceptionWithType(errType int, err error) TProtocolException {
+	if err == nil {
+		return nil
+	}
+	return &tProtocolException{
+		typeId: errType,
+		err:    err,
+		msg:    err.Error(),
+	}
+}
+
+func prependTProtocolException(prepend string, err TProtocolException) TProtocolException {
+	return &tProtocolException{
+		typeId: err.TypeId(),
+		err:    err,
+		msg:    prepend + err.Error(),
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
new file mode 100644
index 0000000..c40f796
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/protocol_factory.go
@@ -0,0 +1,25 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory interface for constructing protocol instances.
+type TProtocolFactory interface {
+	GetProtocol(trans TTransport) TProtocol
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
new file mode 100644
index 0000000..d884c6a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/response_helper.go
@@ -0,0 +1,94 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+)
+
+// See https://godoc.org/context#WithValue on why do we need the unexported typedefs.
+type responseHelperKey struct{}
+
+// TResponseHelper defines a object with a set of helper functions that can be
+// retrieved from the context object passed into server handler functions.
+//
+// Use GetResponseHelper to retrieve the injected TResponseHelper implementation
+// from the context object.
+//
+// The zero value of TResponseHelper is valid with all helper functions being
+// no-op.
+type TResponseHelper struct {
+	// THeader related functions
+	*THeaderResponseHelper
+}
+
+// THeaderResponseHelper defines THeader related TResponseHelper functions.
+//
+// The zero value of *THeaderResponseHelper is valid with all helper functions
+// being no-op.
+type THeaderResponseHelper struct {
+	proto *THeaderProtocol
+}
+
+// NewTHeaderResponseHelper creates a new THeaderResponseHelper from the
+// underlying TProtocol.
+func NewTHeaderResponseHelper(proto TProtocol) *THeaderResponseHelper {
+	if hp, ok := proto.(*THeaderProtocol); ok {
+		return &THeaderResponseHelper{
+			proto: hp,
+		}
+	}
+	return nil
+}
+
+// SetHeader sets a response header.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) SetHeader(key, value string) {
+	if h != nil && h.proto != nil {
+		h.proto.SetWriteHeader(key, value)
+	}
+}
+
+// ClearHeaders clears all the response headers previously set.
+//
+// It's no-op if the underlying protocol/transport does not support THeader.
+func (h *THeaderResponseHelper) ClearHeaders() {
+	if h != nil && h.proto != nil {
+		h.proto.ClearWriteHeaders()
+	}
+}
+
+// GetResponseHelper retrieves the TResponseHelper implementation injected into
+// the context object.
+//
+// If no helper was found in the context object, a nop helper with ok == false
+// will be returned.
+func GetResponseHelper(ctx context.Context) (helper TResponseHelper, ok bool) {
+	if v := ctx.Value(responseHelperKey{}); v != nil {
+		helper, ok = v.(TResponseHelper)
+	}
+	return
+}
+
+// SetResponseHelper injects TResponseHelper into the context object.
+func SetResponseHelper(ctx context.Context, helper TResponseHelper) context.Context {
+	return context.WithValue(ctx, responseHelperKey{}, helper)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
new file mode 100644
index 0000000..83fdf29
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/rich_transport.go
@@ -0,0 +1,71 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+type RichTransport struct {
+	TTransport
+}
+
+// Wraps Transport to provide TRichTransport interface
+func NewTRichTransport(trans TTransport) *RichTransport {
+	return &RichTransport{trans}
+}
+
+func (r *RichTransport) ReadByte() (c byte, err error) {
+	return readByte(r.TTransport)
+}
+
+func (r *RichTransport) WriteByte(c byte) error {
+	return writeByte(r.TTransport, c)
+}
+
+func (r *RichTransport) WriteString(s string) (n int, err error) {
+	return r.Write([]byte(s))
+}
+
+func (r *RichTransport) RemainingBytes() (num_bytes uint64) {
+	return r.TTransport.RemainingBytes()
+}
+
+func readByte(r io.Reader) (c byte, err error) {
+	v := [1]byte{0}
+	n, err := r.Read(v[0:1])
+	if n > 0 && (err == nil || errors.Is(err, io.EOF)) {
+		return v[0], nil
+	}
+	if n > 0 && err != nil {
+		return v[0], err
+	}
+	if err != nil {
+		return 0, err
+	}
+	return v[0], nil
+}
+
+func writeByte(w io.Writer, c byte) error {
+	v := [1]byte{c}
+	_, err := w.Write(v[0:1])
+	return err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
new file mode 100644
index 0000000..c449790
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/serializer.go
@@ -0,0 +1,136 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"sync"
+)
+
+type TSerializer struct {
+	Transport *TMemoryBuffer
+	Protocol  TProtocol
+}
+
+type TStruct interface {
+	Write(ctx context.Context, p TProtocol) error
+	Read(ctx context.Context, p TProtocol) error
+}
+
+func NewTSerializer() *TSerializer {
+	transport := NewTMemoryBufferLen(1024)
+	protocol := NewTBinaryProtocolTransport(transport)
+
+	return &TSerializer{
+		Transport: transport,
+		Protocol:  protocol,
+	}
+}
+
+func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(ctx, t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(ctx); err != nil {
+		return
+	}
+	if err = t.Transport.Flush(ctx); err != nil {
+		return
+	}
+
+	return t.Transport.String(), nil
+}
+
+func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) {
+	t.Transport.Reset()
+
+	if err = msg.Write(ctx, t.Protocol); err != nil {
+		return
+	}
+
+	if err = t.Protocol.Flush(ctx); err != nil {
+		return
+	}
+
+	if err = t.Transport.Flush(ctx); err != nil {
+		return
+	}
+
+	b = append(b, t.Transport.Bytes()...)
+	return
+}
+
+// TSerializerPool is the thread-safe version of TSerializer, it uses resource
+// pool of TSerializer under the hood.
+//
+// It must be initialized with either NewTSerializerPool or
+// NewTSerializerPoolSizeFactory.
+type TSerializerPool struct {
+	pool sync.Pool
+}
+
+// NewTSerializerPool creates a new TSerializerPool.
+//
+// NewTSerializer can be used as the arg here.
+func NewTSerializerPool(f func() *TSerializer) *TSerializerPool {
+	return &TSerializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				return f()
+			},
+		},
+	}
+}
+
+// NewTSerializerPoolSizeFactory creates a new TSerializerPool with the given
+// size and protocol factory.
+//
+// Note that the size is not the limit. The TMemoryBuffer underneath can grow
+// larger than that. It just dictates the initial size.
+func NewTSerializerPoolSizeFactory(size int, factory TProtocolFactory) *TSerializerPool {
+	return &TSerializerPool{
+		pool: sync.Pool{
+			New: func() interface{} {
+				transport := NewTMemoryBufferLen(size)
+				protocol := factory.GetProtocol(transport)
+
+				return &TSerializer{
+					Transport: transport,
+					Protocol:  protocol,
+				}
+			},
+		},
+	}
+}
+
+func (t *TSerializerPool) WriteString(ctx context.Context, msg TStruct) (string, error) {
+	s := t.pool.Get().(*TSerializer)
+	defer t.pool.Put(s)
+	return s.WriteString(ctx, msg)
+}
+
+func (t *TSerializerPool) Write(ctx context.Context, msg TStruct) ([]byte, error) {
+	s := t.pool.Get().(*TSerializer)
+	defer t.pool.Put(s)
+	return s.Write(ctx, msg)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
new file mode 100644
index 0000000..51c40b6
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/server_transport.go
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Server transport. Object which provides client transports.
+type TServerTransport interface {
+	Listen() error
+	Accept() (TTransport, error)
+	Close() error
+
+	// Optional method implementation. This signals to the server transport
+	// that it should break out of any accept() or listen() that it is currently
+	// blocked on. This method, if implemented, MUST be thread safe, as it may
+	// be called from a different thread context than the other TServerTransport
+	// methods.
+	Interrupt() error
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
new file mode 100644
index 0000000..d1a8154
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_json_protocol.go
@@ -0,0 +1,1373 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"bufio"
+	"bytes"
+	"context"
+	"encoding/base64"
+	"encoding/json"
+	"errors"
+	"fmt"
+	"io"
+	"math"
+	"strconv"
+)
+
+type _ParseContext int
+
+const (
+	_CONTEXT_INVALID              _ParseContext = iota
+	_CONTEXT_IN_TOPLEVEL                        // 1
+	_CONTEXT_IN_LIST_FIRST                      // 2
+	_CONTEXT_IN_LIST                            // 3
+	_CONTEXT_IN_OBJECT_FIRST                    // 4
+	_CONTEXT_IN_OBJECT_NEXT_KEY                 // 5
+	_CONTEXT_IN_OBJECT_NEXT_VALUE               // 6
+)
+
+func (p _ParseContext) String() string {
+	switch p {
+	case _CONTEXT_IN_TOPLEVEL:
+		return "TOPLEVEL"
+	case _CONTEXT_IN_LIST_FIRST:
+		return "LIST-FIRST"
+	case _CONTEXT_IN_LIST:
+		return "LIST"
+	case _CONTEXT_IN_OBJECT_FIRST:
+		return "OBJECT-FIRST"
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		return "OBJECT-NEXT-KEY"
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		return "OBJECT-NEXT-VALUE"
+	}
+	return "UNKNOWN-PARSE-CONTEXT"
+}
+
+type jsonContextStack []_ParseContext
+
+func (s *jsonContextStack) push(v _ParseContext) {
+	*s = append(*s, v)
+}
+
+func (s jsonContextStack) peek() (v _ParseContext, ok bool) {
+	l := len(s)
+	if l <= 0 {
+		return
+	}
+	return s[l-1], true
+}
+
+func (s *jsonContextStack) pop() (v _ParseContext, ok bool) {
+	l := len(*s)
+	if l <= 0 {
+		return
+	}
+	v = (*s)[l-1]
+	*s = (*s)[0 : l-1]
+	return v, true
+}
+
+var errEmptyJSONContextStack = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Unexpected empty json protocol context stack"))
+
+// Simple JSON protocol implementation for thrift.
+//
+// This protocol produces/consumes a simple output format
+// suitable for parsing by scripting languages.  It should not be
+// confused with the full-featured TJSONProtocol.
+//
+type TSimpleJSONProtocol struct {
+	trans TTransport
+
+	parseContextStack jsonContextStack
+	dumpContext       jsonContextStack
+
+	writer *bufio.Writer
+	reader *bufio.Reader
+}
+
+// Constructor
+func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol {
+	v := &TSimpleJSONProtocol{trans: t,
+		writer: bufio.NewWriter(t),
+		reader: bufio.NewReader(t),
+	}
+	v.parseContextStack.push(_CONTEXT_IN_TOPLEVEL)
+	v.dumpContext.push(_CONTEXT_IN_TOPLEVEL)
+	return v
+}
+
+// Factory
+type TSimpleJSONProtocolFactory struct{}
+
+func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
+	return NewTSimpleJSONProtocol(trans)
+}
+
+func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory {
+	return &TSimpleJSONProtocolFactory{}
+}
+
+var (
+	JSON_COMMA                   []byte
+	JSON_COLON                   []byte
+	JSON_LBRACE                  []byte
+	JSON_RBRACE                  []byte
+	JSON_LBRACKET                []byte
+	JSON_RBRACKET                []byte
+	JSON_QUOTE                   byte
+	JSON_QUOTE_BYTES             []byte
+	JSON_NULL                    []byte
+	JSON_TRUE                    []byte
+	JSON_FALSE                   []byte
+	JSON_INFINITY                string
+	JSON_NEGATIVE_INFINITY       string
+	JSON_NAN                     string
+	JSON_INFINITY_BYTES          []byte
+	JSON_NEGATIVE_INFINITY_BYTES []byte
+	JSON_NAN_BYTES               []byte
+	json_nonbase_map_elem_bytes  []byte
+)
+
+func init() {
+	JSON_COMMA = []byte{','}
+	JSON_COLON = []byte{':'}
+	JSON_LBRACE = []byte{'{'}
+	JSON_RBRACE = []byte{'}'}
+	JSON_LBRACKET = []byte{'['}
+	JSON_RBRACKET = []byte{']'}
+	JSON_QUOTE = '"'
+	JSON_QUOTE_BYTES = []byte{'"'}
+	JSON_NULL = []byte{'n', 'u', 'l', 'l'}
+	JSON_TRUE = []byte{'t', 'r', 'u', 'e'}
+	JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'}
+	JSON_INFINITY = "Infinity"
+	JSON_NEGATIVE_INFINITY = "-Infinity"
+	JSON_NAN = "NaN"
+	JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'}
+	JSON_NAN_BYTES = []byte{'N', 'a', 'N'}
+	json_nonbase_map_elem_bytes = []byte{']', ',', '['}
+}
+
+func jsonQuote(s string) string {
+	b, _ := json.Marshal(s)
+	s1 := string(b)
+	return s1
+}
+
+func jsonUnquote(s string) (string, bool) {
+	s1 := new(string)
+	err := json.Unmarshal([]byte(s), s1)
+	return *s1, err == nil
+}
+
+func mismatch(expected, actual string) error {
+	return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual)
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageBegin(ctx context.Context, name string, typeId TMessageType, seqId int32) error {
+	p.resetContextStack() // THRIFT-3735
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteString(ctx, name); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(typeId)); e != nil {
+		return e
+	}
+	if e := p.WriteI32(ctx, seqId); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteMessageEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteStructBegin(ctx context.Context, name string) error {
+	if e := p.OutputObjectBegin(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteStructEnd(ctx context.Context) error {
+	return p.OutputObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldBegin(ctx context.Context, name string, typeId TType, id int16) error {
+	if e := p.WriteString(ctx, name); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) WriteFieldStop(ctx context.Context) error { return nil }
+
+func (p *TSimpleJSONProtocol) WriteMapBegin(ctx context.Context, keyType TType, valueType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(keyType)); e != nil {
+		return e
+	}
+	if e := p.WriteByte(ctx, int8(valueType)); e != nil {
+		return e
+	}
+	return p.WriteI32(ctx, int32(size))
+}
+
+func (p *TSimpleJSONProtocol) WriteMapEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteListBegin(ctx context.Context, elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteListEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteSetBegin(ctx context.Context, elemType TType, size int) error {
+	return p.OutputElemListBegin(elemType, size)
+}
+
+func (p *TSimpleJSONProtocol) WriteSetEnd(ctx context.Context) error {
+	return p.OutputListEnd()
+}
+
+func (p *TSimpleJSONProtocol) WriteBool(ctx context.Context, b bool) error {
+	return p.OutputBool(b)
+}
+
+func (p *TSimpleJSONProtocol) WriteByte(ctx context.Context, b int8) error {
+	return p.WriteI32(ctx, int32(b))
+}
+
+func (p *TSimpleJSONProtocol) WriteI16(ctx context.Context, v int16) error {
+	return p.WriteI32(ctx, int32(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI32(ctx context.Context, v int32) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteI64(ctx context.Context, v int64) error {
+	return p.OutputI64(int64(v))
+}
+
+func (p *TSimpleJSONProtocol) WriteDouble(ctx context.Context, v float64) error {
+	return p.OutputF64(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteString(ctx context.Context, v string) error {
+	return p.OutputString(v)
+}
+
+func (p *TSimpleJSONProtocol) WriteBinary(ctx context.Context, v []byte) error {
+	// JSON library only takes in a string,
+	// not an arbitrary byte array, to ensure bytes are transmitted
+	// efficiently we must convert this into a valid JSON string
+	// therefore we use base64 encoding to avoid excessive escaping/quoting
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	writer := base64.NewEncoder(base64.StdEncoding, p.writer)
+	if _, e := writer.Write(v); e != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+		return NewTProtocolException(e)
+	}
+	if e := writer.Close(); e != nil {
+		return NewTProtocolException(e)
+	}
+	if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+// Reading methods.
+func (p *TSimpleJSONProtocol) ReadMessageBegin(ctx context.Context) (name string, typeId TMessageType, seqId int32, err error) {
+	p.resetContextStack() // THRIFT-3735
+	if isNull, err := p.ParseListBegin(); isNull || err != nil {
+		return name, typeId, seqId, err
+	}
+	if name, err = p.ReadString(ctx); err != nil {
+		return name, typeId, seqId, err
+	}
+	bTypeId, err := p.ReadByte(ctx)
+	typeId = TMessageType(bTypeId)
+	if err != nil {
+		return name, typeId, seqId, err
+	}
+	if seqId, err = p.ReadI32(ctx); err != nil {
+		return name, typeId, seqId, err
+	}
+	return name, typeId, seqId, nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMessageEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadStructBegin(ctx context.Context) (name string, err error) {
+	_, err = p.ParseObjectStart()
+	return "", err
+}
+
+func (p *TSimpleJSONProtocol) ReadStructEnd(ctx context.Context) error {
+	return p.ParseObjectEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldBegin(ctx context.Context) (string, TType, int16, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return "", STOP, 0, err
+	}
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 {
+		switch b[0] {
+		case JSON_RBRACE[0]:
+			return "", STOP, 0, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			name, err := p.ParseStringBody()
+			// simplejson is not meant to be read back into thrift
+			// - see http://wiki.apache.org/thrift/ThriftUsageJava
+			// - use JSON instead
+			if err != nil {
+				return name, STOP, 0, err
+			}
+			return name, STOP, -1, p.ParsePostValue()
+		}
+		e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b))
+		return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return "", STOP, 0, NewTProtocolException(io.EOF)
+}
+
+func (p *TSimpleJSONProtocol) ReadFieldEnd(ctx context.Context) error {
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ReadMapBegin(ctx context.Context) (keyType TType, valueType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, VOID, 0, e
+	}
+
+	// read keyType
+	bKeyType, e := p.ReadByte(ctx)
+	keyType = TType(bKeyType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read valueType
+	bValueType, e := p.ReadByte(ctx)
+	valueType = TType(bValueType)
+	if e != nil {
+		return keyType, valueType, size, e
+	}
+
+	// read size
+	iSize, err := p.ReadI64(ctx)
+	size = int(iSize)
+	return keyType, valueType, size, err
+}
+
+func (p *TSimpleJSONProtocol) ReadMapEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadListBegin(ctx context.Context) (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadListEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetBegin(ctx context.Context) (elemType TType, size int, e error) {
+	return p.ParseElemListBegin()
+}
+
+func (p *TSimpleJSONProtocol) ReadSetEnd(ctx context.Context) error {
+	return p.ParseListEnd()
+}
+
+func (p *TSimpleJSONProtocol) ReadBool(ctx context.Context) (bool, error) {
+	var value bool
+
+	if err := p.ParsePreValue(); err != nil {
+		return value, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 {
+		switch f[0] {
+		case JSON_TRUE[0]:
+			b := make([]byte, len(JSON_TRUE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_TRUE) {
+				value = true
+			} else {
+				e := fmt.Errorf("Expected \"true\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_FALSE[0]:
+			b := make([]byte, len(JSON_FALSE))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_FALSE) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"false\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			break
+		case JSON_NULL[0]:
+			b := make([]byte, len(JSON_NULL))
+			_, err := p.reader.Read(b)
+			if err != nil {
+				return false, NewTProtocolException(err)
+			}
+			if string(b) == string(JSON_NULL) {
+				value = false
+			} else {
+				e := fmt.Errorf("Expected \"null\" but found: %s", string(b))
+				return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		default:
+			e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f))
+			return value, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	return value, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadByte(ctx context.Context) (int8, error) {
+	v, err := p.ReadI64(ctx)
+	return int8(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI16(ctx context.Context) (int16, error) {
+	v, err := p.ReadI64(ctx)
+	return int16(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI32(ctx context.Context) (int32, error) {
+	v, err := p.ReadI64(ctx)
+	return int32(v), err
+}
+
+func (p *TSimpleJSONProtocol) ReadI64(ctx context.Context) (int64, error) {
+	v, _, err := p.ParseI64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadDouble(ctx context.Context) (float64, error) {
+	v, _, err := p.ParseF64()
+	return v, err
+}
+
+func (p *TSimpleJSONProtocol) ReadString(ctx context.Context) (string, error) {
+	var v string
+	if err := p.ParsePreValue(); err != nil {
+		return v, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseStringBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ReadBinary(ctx context.Context) ([]byte, error) {
+	var v []byte
+	if err := p.ParsePreValue(); err != nil {
+		return nil, err
+	}
+	f, _ := p.reader.Peek(1)
+	if len(f) > 0 && f[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+		value, err := p.ParseBase64EncodedBody()
+		v = value
+		if err != nil {
+			return v, err
+		}
+	} else if len(f) > 0 && f[0] == JSON_NULL[0] {
+		b := make([]byte, len(JSON_NULL))
+		_, err := p.reader.Read(b)
+		if err != nil {
+			return v, NewTProtocolException(err)
+		}
+		if string(b) != string(JSON_NULL) {
+			e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
+			return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	} else {
+		e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
+		return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+
+	return v, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) {
+	return NewTProtocolException(p.writer.Flush())
+}
+
+func (p *TSimpleJSONProtocol) Skip(ctx context.Context, fieldType TType) (err error) {
+	return SkipDefaultDepth(ctx, p, fieldType)
+}
+
+func (p *TSimpleJSONProtocol) Transport() TTransport {
+	return p.trans
+}
+
+func (p *TSimpleJSONProtocol) OutputPreValue() error {
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if _, e := p.write(JSON_COMMA); e != nil {
+			return NewTProtocolException(e)
+		}
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if _, e := p.write(JSON_COLON); e != nil {
+			return NewTProtocolException(e)
+		}
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputPostValue() error {
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_LIST)
+	case _CONTEXT_IN_OBJECT_FIRST:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.dumpContext.pop()
+		p.dumpContext.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputBool(value bool) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if value {
+		v = string(JSON_TRUE)
+	} else {
+		v = string(JSON_FALSE)
+	}
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputNull() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_NULL); e != nil {
+		return NewTProtocolException(e)
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputF64(value float64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	var v string
+	if math.IsNaN(value) {
+		v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE)
+	} else if math.IsInf(value, 1) {
+		v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE)
+	} else if math.IsInf(value, -1) {
+		v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE)
+	} else {
+		cxt, ok := p.dumpContext.peek()
+		if !ok {
+			return errEmptyJSONContextStack
+		}
+		v = strconv.FormatFloat(value, 'g', -1, 64)
+		switch cxt {
+		case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+			v = string(JSON_QUOTE) + v + string(JSON_QUOTE)
+		}
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputI64(value int64) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	cxt, ok := p.dumpContext.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	v := strconv.FormatInt(value, 10)
+	switch cxt {
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		v = jsonQuote(v)
+	}
+	if e := p.OutputStringData(v); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputString(s string) error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if e := p.OutputStringData(jsonQuote(s)); e != nil {
+		return e
+	}
+	return p.OutputPostValue()
+}
+
+func (p *TSimpleJSONProtocol) OutputStringData(s string) error {
+	_, e := p.write([]byte(s))
+	return NewTProtocolException(e)
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext.push(_CONTEXT_IN_OBJECT_FIRST)
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputObjectEnd() error {
+	if _, e := p.write(JSON_RBRACE); e != nil {
+		return NewTProtocolException(e)
+	}
+	_, ok := p.dumpContext.pop()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListBegin() error {
+	if e := p.OutputPreValue(); e != nil {
+		return e
+	}
+	if _, e := p.write(JSON_LBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	p.dumpContext.push(_CONTEXT_IN_LIST_FIRST)
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputListEnd() error {
+	if _, e := p.write(JSON_RBRACKET); e != nil {
+		return NewTProtocolException(e)
+	}
+	_, ok := p.dumpContext.pop()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	if e := p.OutputPostValue(); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
+	if e := p.OutputListBegin(); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(elemType)); e != nil {
+		return e
+	}
+	if e := p.OutputI64(int64(size)); e != nil {
+		return e
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePreValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt, ok := p.parseContextStack.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	b, _ := p.reader.Peek(1)
+	switch cxt {
+	case _CONTEXT_IN_LIST:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACKET[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+	case _CONTEXT_IN_OBJECT_NEXT_KEY:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_RBRACE[0]:
+				return nil
+			case JSON_COMMA[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		if len(b) > 0 {
+			switch b[0] {
+			case JSON_COLON[0]:
+				p.reader.ReadByte()
+				if e := p.readNonSignificantWhitespace(); e != nil {
+					return NewTProtocolException(e)
+				}
+				return nil
+			default:
+				e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b))
+				return NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		}
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParsePostValue() error {
+	if e := p.readNonSignificantWhitespace(); e != nil {
+		return NewTProtocolException(e)
+	}
+	cxt, ok := p.parseContextStack.peek()
+	if !ok {
+		return errEmptyJSONContextStack
+	}
+	switch cxt {
+	case _CONTEXT_IN_LIST_FIRST:
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_LIST)
+	case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY:
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_VALUE)
+	case _CONTEXT_IN_OBJECT_NEXT_VALUE:
+		p.parseContextStack.pop()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_NEXT_KEY)
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error {
+	for {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return nil
+		}
+		switch b[0] {
+		case ' ', '\r', '\n', '\t':
+			p.reader.ReadByte()
+			continue
+		default:
+			break
+		}
+		break
+	}
+	return nil
+}
+
+func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		v, ok := jsonUnquote(string(JSON_QUOTE) + line)
+		if !ok {
+			return "", NewTProtocolException(err)
+		}
+		return v, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	str := string(JSON_QUOTE) + line + s
+	v, ok := jsonUnquote(str)
+	if !ok {
+		e := fmt.Errorf("Unable to parse as JSON string %s", str)
+		return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) {
+	line, err := p.reader.ReadString(JSON_QUOTE)
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	l := len(line)
+	// count number of escapes to see if we need to keep going
+	i := 1
+	for ; i < l; i++ {
+		if line[l-i-1] != '\\' {
+			break
+		}
+	}
+	if i&0x01 == 1 {
+		return line, nil
+	}
+	s, err := p.ParseQuotedStringBody()
+	if err != nil {
+		return "", NewTProtocolException(err)
+	}
+	v := line + s
+	return v, nil
+}
+
+func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) {
+	line, err := p.reader.ReadBytes(JSON_QUOTE)
+	if err != nil {
+		return line, NewTProtocolException(err)
+	}
+	line2 := line[0 : len(line)-1]
+	l := len(line2)
+	if (l % 4) != 0 {
+		pad := 4 - (l % 4)
+		fill := [...]byte{'=', '=', '='}
+		line2 = append(line2, fill[:pad]...)
+		l = len(line2)
+	}
+	output := make([]byte, base64.StdEncoding.DecodedLen(l))
+	n, err := base64.StdEncoding.Decode(output, line2)
+	return output[0:n], NewTProtocolException(err)
+}
+
+func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value int64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Int64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return 0, false, err
+	}
+	var value float64
+	var isnull bool
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		isnull = true
+	} else {
+		num, err := p.readNumeric()
+		isnull = (num == nil)
+		if !isnull {
+			value = num.Float64()
+		}
+		if err != nil {
+			return value, isnull, err
+		}
+	}
+	return value, isnull, p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) {
+	if err := p.ParsePreValue(); err != nil {
+		return false, err
+	}
+	var b []byte
+	b, err := p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) > 0 && b[0] == JSON_LBRACE[0] {
+		p.reader.ReadByte()
+		p.parseContextStack.push(_CONTEXT_IN_OBJECT_FIRST)
+		return false, nil
+	} else if p.safePeekContains(JSON_NULL) {
+		return true, nil
+	}
+	e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b))
+	return false, NewTProtocolExceptionWithType(INVALID_DATA, e)
+}
+
+func (p *TSimpleJSONProtocol) ParseObjectEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt, _ := p.parseContextStack.peek()
+	if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) {
+		e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACE[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', '}':
+			break
+		}
+	}
+	p.parseContextStack.pop()
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) {
+	if e := p.ParsePreValue(); e != nil {
+		return false, e
+	}
+	var b []byte
+	b, err = p.reader.Peek(1)
+	if err != nil {
+		return false, err
+	}
+	if len(b) >= 1 && b[0] == JSON_LBRACKET[0] {
+		p.parseContextStack.push(_CONTEXT_IN_LIST_FIRST)
+		p.reader.ReadByte()
+		isNull = false
+	} else if p.safePeekContains(JSON_NULL) {
+		isNull = true
+	} else {
+		err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b)
+	}
+	return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err)
+}
+
+func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
+	if isNull, e := p.ParseListBegin(); isNull || e != nil {
+		return VOID, 0, e
+	}
+	bElemType, _, err := p.ParseI64()
+	elemType = TType(bElemType)
+	if err != nil {
+		return elemType, size, err
+	}
+	nSize, _, err2 := p.ParseI64()
+	size = int(nSize)
+	return elemType, size, err2
+}
+
+func (p *TSimpleJSONProtocol) ParseListEnd() error {
+	if isNull, err := p.readIfNull(); isNull || err != nil {
+		return err
+	}
+	cxt, _ := p.parseContextStack.peek()
+	if cxt != _CONTEXT_IN_LIST {
+		e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt)
+		return NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	line, err := p.reader.ReadString(JSON_RBRACKET[0])
+	if err != nil {
+		return NewTProtocolException(err)
+	}
+	for _, char := range line {
+		switch char {
+		default:
+			e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line)
+			return NewTProtocolExceptionWithType(INVALID_DATA, e)
+		case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]):
+			break
+		}
+	}
+	p.parseContextStack.pop()
+	if cxt, ok := p.parseContextStack.peek(); !ok {
+		return errEmptyJSONContextStack
+	} else if cxt == _CONTEXT_IN_TOPLEVEL {
+		return nil
+	}
+	return p.ParsePostValue()
+}
+
+func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) {
+	e := p.readNonSignificantWhitespace()
+	if e != nil {
+		return nil, VOID, NewTProtocolException(e)
+	}
+	b, e := p.reader.Peek(1)
+	if len(b) > 0 {
+		c := b[0]
+		switch c {
+		case JSON_NULL[0]:
+			buf := make([]byte, len(JSON_NULL))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return nil, VOID, NewTProtocolException(e)
+			}
+			if string(JSON_NULL) != string(buf) {
+				e = mismatch(string(JSON_NULL), string(buf))
+				return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return nil, VOID, nil
+		case JSON_QUOTE:
+			p.reader.ReadByte()
+			v, e := p.ParseStringBody()
+			if e != nil {
+				return v, UTF8, NewTProtocolException(e)
+			}
+			if v == JSON_INFINITY {
+				return INFINITY, DOUBLE, nil
+			} else if v == JSON_NEGATIVE_INFINITY {
+				return NEGATIVE_INFINITY, DOUBLE, nil
+			} else if v == JSON_NAN {
+				return NAN, DOUBLE, nil
+			}
+			return v, UTF8, nil
+		case JSON_TRUE[0]:
+			buf := make([]byte, len(JSON_TRUE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return true, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_TRUE) != string(buf) {
+				e := mismatch(string(JSON_TRUE), string(buf))
+				return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return true, BOOL, nil
+		case JSON_FALSE[0]:
+			buf := make([]byte, len(JSON_FALSE))
+			_, e := p.reader.Read(buf)
+			if e != nil {
+				return false, BOOL, NewTProtocolException(e)
+			}
+			if string(JSON_FALSE) != string(buf) {
+				e := mismatch(string(JSON_FALSE), string(buf))
+				return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			return false, BOOL, nil
+		case JSON_LBRACKET[0]:
+			_, e := p.reader.ReadByte()
+			return make([]interface{}, 0), LIST, NewTProtocolException(e)
+		case JSON_LBRACE[0]:
+			_, e := p.reader.ReadByte()
+			return make(map[string]interface{}), STRUCT, NewTProtocolException(e)
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]:
+			// assume numeric
+			v, e := p.readNumeric()
+			return v, DOUBLE, e
+		default:
+			e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c))
+			return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	e = fmt.Errorf("Cannot read a single element while parsing JSON.")
+	return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e)
+
+}
+
+func (p *TSimpleJSONProtocol) readIfNull() (bool, error) {
+	cont := true
+	for cont {
+		b, _ := p.reader.Peek(1)
+		if len(b) < 1 {
+			return false, nil
+		}
+		switch b[0] {
+		default:
+			return false, nil
+		case JSON_NULL[0]:
+			cont = false
+			break
+		case ' ', '\n', '\r', '\t':
+			p.reader.ReadByte()
+			break
+		}
+	}
+	if p.safePeekContains(JSON_NULL) {
+		p.reader.Read(make([]byte, len(JSON_NULL)))
+		return true, nil
+	}
+	return false, nil
+}
+
+func (p *TSimpleJSONProtocol) readQuoteIfNext() {
+	b, _ := p.reader.Peek(1)
+	if len(b) > 0 && b[0] == JSON_QUOTE {
+		p.reader.ReadByte()
+	}
+}
+
+func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) {
+	isNull, err := p.readIfNull()
+	if isNull || err != nil {
+		return NUMERIC_NULL, err
+	}
+	hasDecimalPoint := false
+	nextCanBeSign := true
+	hasE := false
+	MAX_LEN := 40
+	buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN))
+	continueFor := true
+	inQuotes := false
+	for continueFor {
+		c, err := p.reader.ReadByte()
+		if err != nil {
+			if err == io.EOF {
+				break
+			}
+			return NUMERIC_NULL, NewTProtocolException(err)
+		}
+		switch c {
+		case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case '.':
+			if hasDecimalPoint {
+				e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasDecimalPoint, nextCanBeSign = true, false
+		case 'e', 'E':
+			if hasE {
+				e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			hasE, nextCanBeSign = true, true
+		case '-', '+':
+			if !nextCanBeSign {
+				e := fmt.Errorf("Negative sign within number")
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+			buf.WriteByte(c)
+			nextCanBeSign = false
+		case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]:
+			p.reader.UnreadByte()
+			continueFor = false
+		case JSON_NAN[0]:
+			if buf.Len() == 0 {
+				buffer := make([]byte, len(JSON_NAN))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NAN != string(buffer) {
+					e := mismatch(JSON_NAN, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NAN, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_INFINITY[0]:
+			if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') {
+				buffer := make([]byte, len(JSON_INFINITY))
+				buffer[0] = c
+				_, e := p.reader.Read(buffer[1:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_INFINITY != string(buffer) {
+					e := mismatch(JSON_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return INFINITY, nil
+			} else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] {
+				buffer := make([]byte, len(JSON_NEGATIVE_INFINITY))
+				buffer[0] = JSON_NEGATIVE_INFINITY[0]
+				buffer[1] = c
+				_, e := p.reader.Read(buffer[2:])
+				if e != nil {
+					return NUMERIC_NULL, NewTProtocolException(e)
+				}
+				if JSON_NEGATIVE_INFINITY != string(buffer) {
+					e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer))
+					return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+				}
+				if inQuotes {
+					p.readQuoteIfNext()
+				}
+				return NEGATIVE_INFINITY, nil
+			} else {
+				e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String())
+				return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+			}
+		case JSON_QUOTE:
+			if !inQuotes {
+				inQuotes = true
+			} else {
+				break
+			}
+		default:
+			e := fmt.Errorf("Unable to parse number starting with character '%c'", c)
+			return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+		}
+	}
+	if buf.Len() == 0 {
+		e := fmt.Errorf("Unable to parse number from empty string ''")
+		return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e)
+	}
+	return NewNumericFromJSONString(buf.String(), false), nil
+}
+
+// Safely peeks into the buffer, reading only what is necessary
+func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool {
+	for i := 0; i < len(b); i++ {
+		a, _ := p.reader.Peek(i + 1)
+		if len(a) < (i+1) || a[i] != b[i] {
+			return false
+		}
+	}
+	return true
+}
+
+// Reset the context stack to its initial state.
+func (p *TSimpleJSONProtocol) resetContextStack() {
+	p.parseContextStack = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+	p.dumpContext = jsonContextStack{_CONTEXT_IN_TOPLEVEL}
+}
+
+func (p *TSimpleJSONProtocol) write(b []byte) (int, error) {
+	n, err := p.writer.Write(b)
+	if err != nil {
+		p.writer.Reset(p.trans) // THRIFT-3735
+	}
+	return n, err
+}
+
+// SetTConfiguration implements TConfigurationSetter for propagation.
+func (p *TSimpleJSONProtocol) SetTConfiguration(conf *TConfiguration) {
+	PropagateTConfiguration(p.trans, conf)
+}
+
+var _ TConfigurationSetter = (*TSimpleJSONProtocol)(nil)
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
new file mode 100644
index 0000000..563cbfc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/simple_server.go
@@ -0,0 +1,332 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"fmt"
+	"io"
+	"sync"
+	"sync/atomic"
+	"time"
+)
+
+// ErrAbandonRequest is a special error server handler implementations can
+// return to indicate that the request has been abandoned.
+//
+// TSimpleServer will check for this error, and close the client connection
+// instead of writing the response/error back to the client.
+//
+// It shall only be used when the server handler implementation know that the
+// client already abandoned the request (by checking that the passed in context
+// is already canceled, for example).
+var ErrAbandonRequest = errors.New("request abandoned")
+
+// ServerConnectivityCheckInterval defines the ticker interval used by
+// connectivity check in thrift compiled TProcessorFunc implementations.
+//
+// It's defined as a variable instead of constant, so that thrift server
+// implementations can change its value to control the behavior.
+//
+// If it's changed to <=0, the feature will be disabled.
+var ServerConnectivityCheckInterval = time.Millisecond * 5
+
+/*
+ * This is not a typical TSimpleServer as it is not blocked after accept a socket.
+ * It is more like a TThreadedServer that can handle different connections in different goroutines.
+ * This will work if golang user implements a conn-pool like thing in client side.
+ */
+type TSimpleServer struct {
+	closed int32
+	wg     sync.WaitGroup
+	mu     sync.Mutex
+
+	processorFactory       TProcessorFactory
+	serverTransport        TServerTransport
+	inputTransportFactory  TTransportFactory
+	outputTransportFactory TTransportFactory
+	inputProtocolFactory   TProtocolFactory
+	outputProtocolFactory  TProtocolFactory
+
+	// Headers to auto forward in THeaderProtocol
+	forwardHeaders []string
+
+	logger Logger
+}
+
+func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport)
+}
+
+func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory4(NewTProcessorFactory(processor),
+		serverTransport,
+		transportFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(NewTProcessorFactory(processor),
+		serverTransport,
+		inputTransportFactory,
+		outputTransportFactory,
+		inputProtocolFactory,
+		outputProtocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		NewTTransportFactory(),
+		NewTTransportFactory(),
+		NewTBinaryProtocolFactoryDefault(),
+		NewTBinaryProtocolFactoryDefault(),
+	)
+}
+
+func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer {
+	return NewTSimpleServerFactory6(processorFactory,
+		serverTransport,
+		transportFactory,
+		transportFactory,
+		protocolFactory,
+		protocolFactory,
+	)
+}
+
+func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer {
+	return &TSimpleServer{
+		processorFactory:       processorFactory,
+		serverTransport:        serverTransport,
+		inputTransportFactory:  inputTransportFactory,
+		outputTransportFactory: outputTransportFactory,
+		inputProtocolFactory:   inputProtocolFactory,
+		outputProtocolFactory:  outputProtocolFactory,
+	}
+}
+
+func (p *TSimpleServer) ProcessorFactory() TProcessorFactory {
+	return p.processorFactory
+}
+
+func (p *TSimpleServer) ServerTransport() TServerTransport {
+	return p.serverTransport
+}
+
+func (p *TSimpleServer) InputTransportFactory() TTransportFactory {
+	return p.inputTransportFactory
+}
+
+func (p *TSimpleServer) OutputTransportFactory() TTransportFactory {
+	return p.outputTransportFactory
+}
+
+func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory {
+	return p.inputProtocolFactory
+}
+
+func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory {
+	return p.outputProtocolFactory
+}
+
+func (p *TSimpleServer) Listen() error {
+	return p.serverTransport.Listen()
+}
+
+// SetForwardHeaders sets the list of header keys that will be auto forwarded
+// while using THeaderProtocol.
+//
+// "forward" means that when the server is also a client to other upstream
+// thrift servers, the context object user gets in the processor functions will
+// have both read and write headers set, with write headers being forwarded.
+// Users can always override the write headers by calling SetWriteHeaderList
+// before calling thrift client functions.
+func (p *TSimpleServer) SetForwardHeaders(headers []string) {
+	size := len(headers)
+	if size == 0 {
+		p.forwardHeaders = nil
+		return
+	}
+
+	keys := make([]string, size)
+	copy(keys, headers)
+	p.forwardHeaders = keys
+}
+
+// SetLogger sets the logger used by this TSimpleServer.
+//
+// If no logger was set before Serve is called, a default logger using standard
+// log library will be used.
+func (p *TSimpleServer) SetLogger(logger Logger) {
+	p.logger = logger
+}
+
+func (p *TSimpleServer) innerAccept() (int32, error) {
+	client, err := p.serverTransport.Accept()
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	closed := atomic.LoadInt32(&p.closed)
+	if closed != 0 {
+		return closed, nil
+	}
+	if err != nil {
+		return 0, err
+	}
+	if client != nil {
+		p.wg.Add(1)
+		go func() {
+			defer p.wg.Done()
+			if err := p.processRequests(client); err != nil {
+				p.logger(fmt.Sprintf("error processing request: %v", err))
+			}
+		}()
+	}
+	return 0, nil
+}
+
+func (p *TSimpleServer) AcceptLoop() error {
+	for {
+		closed, err := p.innerAccept()
+		if err != nil {
+			return err
+		}
+		if closed != 0 {
+			return nil
+		}
+	}
+}
+
+func (p *TSimpleServer) Serve() error {
+	p.logger = fallbackLogger(p.logger)
+
+	err := p.Listen()
+	if err != nil {
+		return err
+	}
+	p.AcceptLoop()
+	return nil
+}
+
+func (p *TSimpleServer) Stop() error {
+	p.mu.Lock()
+	defer p.mu.Unlock()
+	if atomic.LoadInt32(&p.closed) != 0 {
+		return nil
+	}
+	atomic.StoreInt32(&p.closed, 1)
+	p.serverTransport.Interrupt()
+	p.wg.Wait()
+	return nil
+}
+
+// If err is actually EOF, return nil, otherwise return err as-is.
+func treatEOFErrorsAsNil(err error) error {
+	if err == nil {
+		return nil
+	}
+	if errors.Is(err, io.EOF) {
+		return nil
+	}
+	var te TTransportException
+	if errors.As(err, &te) && te.TypeId() == END_OF_FILE {
+		return nil
+	}
+	return err
+}
+
+func (p *TSimpleServer) processRequests(client TTransport) (err error) {
+	defer func() {
+		err = treatEOFErrorsAsNil(err)
+	}()
+
+	processor := p.processorFactory.GetProcessor(client)
+	inputTransport, err := p.inputTransportFactory.GetTransport(client)
+	if err != nil {
+		return err
+	}
+	inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport)
+	var outputTransport TTransport
+	var outputProtocol TProtocol
+
+	// for THeaderProtocol, we must use the same protocol instance for
+	// input and output so that the response is in the same dialect that
+	// the server detected the request was in.
+	headerProtocol, ok := inputProtocol.(*THeaderProtocol)
+	if ok {
+		outputProtocol = inputProtocol
+	} else {
+		oTrans, err := p.outputTransportFactory.GetTransport(client)
+		if err != nil {
+			return err
+		}
+		outputTransport = oTrans
+		outputProtocol = p.outputProtocolFactory.GetProtocol(outputTransport)
+	}
+
+	if inputTransport != nil {
+		defer inputTransport.Close()
+	}
+	if outputTransport != nil {
+		defer outputTransport.Close()
+	}
+	for {
+		if atomic.LoadInt32(&p.closed) != 0 {
+			return nil
+		}
+
+		ctx := SetResponseHelper(
+			defaultCtx,
+			TResponseHelper{
+				THeaderResponseHelper: NewTHeaderResponseHelper(outputProtocol),
+			},
+		)
+		if headerProtocol != nil {
+			// We need to call ReadFrame here, otherwise we won't
+			// get any headers on the AddReadTHeaderToContext call.
+			//
+			// ReadFrame is safe to be called multiple times so it
+			// won't break when it's called again later when we
+			// actually start to read the message.
+			if err := headerProtocol.ReadFrame(ctx); err != nil {
+				return err
+			}
+			ctx = AddReadTHeaderToContext(ctx, headerProtocol.GetReadHeaders())
+			ctx = SetWriteHeaderList(ctx, p.forwardHeaders)
+		}
+
+		ok, err := processor.Process(ctx, inputProtocol, outputProtocol)
+		if errors.Is(err, ErrAbandonRequest) {
+			return client.Close()
+		}
+		if errors.As(err, new(TTransportException)) && err != nil {
+			return err
+		}
+		var tae TApplicationException
+		if errors.As(err, &tae) && tae.TypeId() == UNKNOWN_METHOD {
+			continue
+		}
+		if !ok {
+			break
+		}
+	}
+	return nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
new file mode 100644
index 0000000..ba2738a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport.go
@@ -0,0 +1,70 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"context"
+	"errors"
+	"io"
+)
+
+var errTransportInterrupted = errors.New("Transport Interrupted")
+
+type Flusher interface {
+	Flush() (err error)
+}
+
+type ContextFlusher interface {
+	Flush(ctx context.Context) (err error)
+}
+
+type ReadSizeProvider interface {
+	RemainingBytes() (num_bytes uint64)
+}
+
+// Encapsulates the I/O layer
+type TTransport interface {
+	io.ReadWriteCloser
+	ContextFlusher
+	ReadSizeProvider
+
+	// Opens the transport for communication
+	Open() error
+
+	// Returns true if the transport is open
+	IsOpen() bool
+}
+
+type stringWriter interface {
+	WriteString(s string) (n int, err error)
+}
+
+// This is "enchanced" transport with extra capabilities. You need to use one of these
+// to construct protocol.
+// Notably, TSocket does not implement this interface, and it is always a mistake to use
+// TSocket directly in protocol.
+type TRichTransport interface {
+	io.ReadWriter
+	io.ByteReader
+	io.ByteWriter
+	stringWriter
+	ContextFlusher
+	ReadSizeProvider
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
new file mode 100644
index 0000000..0a3f076
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_exception.go
@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+import (
+	"errors"
+	"io"
+)
+
+type timeoutable interface {
+	Timeout() bool
+}
+
+// Thrift Transport exception
+type TTransportException interface {
+	TException
+	TypeId() int
+	Err() error
+}
+
+const (
+	UNKNOWN_TRANSPORT_EXCEPTION = 0
+	NOT_OPEN                    = 1
+	ALREADY_OPEN                = 2
+	TIMED_OUT                   = 3
+	END_OF_FILE                 = 4
+)
+
+type tTransportException struct {
+	typeId int
+	err    error
+	msg    string
+}
+
+var _ TTransportException = (*tTransportException)(nil)
+
+func (tTransportException) TExceptionType() TExceptionType {
+	return TExceptionTypeTransport
+}
+
+func (p *tTransportException) TypeId() int {
+	return p.typeId
+}
+
+func (p *tTransportException) Error() string {
+	return p.msg
+}
+
+func (p *tTransportException) Err() error {
+	return p.err
+}
+
+func (p *tTransportException) Unwrap() error {
+	return p.err
+}
+
+func (p *tTransportException) Timeout() bool {
+	return p.typeId == TIMED_OUT
+}
+
+func NewTTransportException(t int, e string) TTransportException {
+	return &tTransportException{
+		typeId: t,
+		err:    errors.New(e),
+		msg:    e,
+	}
+}
+
+func NewTTransportExceptionFromError(e error) TTransportException {
+	if e == nil {
+		return nil
+	}
+
+	if t, ok := e.(TTransportException); ok {
+		return t
+	}
+
+	te := &tTransportException{
+		typeId: UNKNOWN_TRANSPORT_EXCEPTION,
+		err:    e,
+		msg:    e.Error(),
+	}
+
+	if isTimeoutError(e) {
+		te.typeId = TIMED_OUT
+		return te
+	}
+
+	if errors.Is(e, io.EOF) {
+		te.typeId = END_OF_FILE
+		return te
+	}
+
+	return te
+}
+
+func prependTTransportException(prepend string, e TTransportException) TTransportException {
+	return &tTransportException{
+		typeId: e.TypeId(),
+		err:    e,
+		msg:    prepend + e.Error(),
+	}
+}
+
+// isTimeoutError returns true when err is an error caused by timeout.
+//
+// Note that this also includes TTransportException wrapped timeout errors.
+func isTimeoutError(err error) bool {
+	var t timeoutable
+	if errors.As(err, &t) {
+		return t.Timeout()
+	}
+	return false
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
new file mode 100644
index 0000000..c805807
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/transport_factory.go
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Factory class used to create wrapped instance of Transports.
+// This is used primarily in servers, which get Transports from
+// a ServerTransport and then may want to mutate them (i.e. create
+// a BufferedTransport from the underlying base transport)
+type TTransportFactory interface {
+	GetTransport(trans TTransport) (TTransport, error)
+}
+
+type tTransportFactory struct{}
+
+// Return a wrapped instance of the base Transport.
+func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
+	return trans, nil
+}
+
+func NewTTransportFactory() TTransportFactory {
+	return &tTransportFactory{}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/thrift/type.go b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
new file mode 100644
index 0000000..4292ffc
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/thrift/type.go
@@ -0,0 +1,69 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package thrift
+
+// Type constants in the Thrift protocol
+type TType byte
+
+const (
+	STOP   = 0
+	VOID   = 1
+	BOOL   = 2
+	BYTE   = 3
+	I08    = 3
+	DOUBLE = 4
+	I16    = 6
+	I32    = 8
+	I64    = 10
+	STRING = 11
+	UTF7   = 11
+	STRUCT = 12
+	MAP    = 13
+	SET    = 14
+	LIST   = 15
+	UTF8   = 16
+	UTF16  = 17
+	//BINARY = 18   wrong and unusued
+)
+
+var typeNames = map[int]string{
+	STOP:   "STOP",
+	VOID:   "VOID",
+	BOOL:   "BOOL",
+	BYTE:   "BYTE",
+	DOUBLE: "DOUBLE",
+	I16:    "I16",
+	I32:    "I32",
+	I64:    "I64",
+	STRING: "STRING",
+	STRUCT: "STRUCT",
+	MAP:    "MAP",
+	SET:    "SET",
+	LIST:   "LIST",
+	UTF8:   "UTF8",
+	UTF16:  "UTF16",
+}
+
+func (p TType) String() string {
+	if s, ok := typeNames[int(p)]; ok {
+		return s
+	}
+	return "Unknown"
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer.go b/vendor/github.com/uber/jaeger-client-go/tracer.go
new file mode 100644
index 0000000..9a627be
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer.go
@@ -0,0 +1,493 @@
+// Copyright (c) 2017-2018 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"fmt"
+	"io"
+	"math/rand"
+	"os"
+	"reflect"
+	"strconv"
+	"sync"
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+	"github.com/opentracing/opentracing-go/ext"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+	"github.com/uber/jaeger-client-go/internal/throttler"
+	"github.com/uber/jaeger-client-go/log"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+// Tracer implements opentracing.Tracer.
+type Tracer struct {
+	serviceName string
+	hostIPv4    uint32 // this is for zipkin endpoint conversion
+
+	sampler  SamplerV2
+	reporter Reporter
+	metrics  Metrics
+	logger   log.DebugLogger
+
+	timeNow      func() time.Time
+	randomNumber func() uint64
+
+	options struct {
+		gen128Bit                   bool // whether to generate 128bit trace IDs
+		zipkinSharedRPCSpan         bool
+		highTraceIDGenerator        func() uint64 // custom high trace ID generator
+		maxTagValueLength           int
+		noDebugFlagOnForcedSampling bool
+		maxLogsPerSpan              int
+		// more options to come
+	}
+	// allocator of Span objects
+	spanAllocator SpanAllocator
+
+	injectors  map[interface{}]Injector
+	extractors map[interface{}]Extractor
+
+	observer compositeObserver
+
+	tags    []Tag
+	process Process
+
+	baggageRestrictionManager baggage.RestrictionManager
+	baggageSetter             *baggageSetter
+
+	debugThrottler throttler.Throttler
+}
+
+// NewTracer creates Tracer implementation that reports tracing to Jaeger.
+// The returned io.Closer can be used in shutdown hooks to ensure that the internal
+// queue of the Reporter is drained and all buffered spans are submitted to collectors.
+// TODO (breaking change) return *Tracer only, without closer.
+func NewTracer(
+	serviceName string,
+	sampler Sampler,
+	reporter Reporter,
+	options ...TracerOption,
+) (opentracing.Tracer, io.Closer) {
+	t := &Tracer{
+		serviceName:   serviceName,
+		sampler:       samplerV1toV2(sampler),
+		reporter:      reporter,
+		injectors:     make(map[interface{}]Injector),
+		extractors:    make(map[interface{}]Extractor),
+		metrics:       *NewNullMetrics(),
+		spanAllocator: simpleSpanAllocator{},
+	}
+
+	for _, option := range options {
+		option(t)
+	}
+
+	// register default injectors/extractors unless they are already provided via options
+	textPropagator := NewTextMapPropagator(getDefaultHeadersConfig(), t.metrics)
+	t.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+	httpHeaderPropagator := NewHTTPHeaderPropagator(getDefaultHeadersConfig(), t.metrics)
+	t.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+
+	binaryPropagator := NewBinaryPropagator(t)
+	t.addCodec(opentracing.Binary, binaryPropagator, binaryPropagator)
+
+	// TODO remove after TChannel supports OpenTracing
+	interopPropagator := &jaegerTraceContextPropagator{tracer: t}
+	t.addCodec(SpanContextFormat, interopPropagator, interopPropagator)
+
+	zipkinPropagator := &zipkinPropagator{tracer: t}
+	t.addCodec(ZipkinSpanFormat, zipkinPropagator, zipkinPropagator)
+
+	if t.baggageRestrictionManager != nil {
+		t.baggageSetter = newBaggageSetter(t.baggageRestrictionManager, &t.metrics)
+	} else {
+		t.baggageSetter = newBaggageSetter(baggage.NewDefaultRestrictionManager(0), &t.metrics)
+	}
+	if t.debugThrottler == nil {
+		t.debugThrottler = throttler.DefaultThrottler{}
+	}
+
+	if t.randomNumber == nil {
+		seedGenerator := utils.NewRand(time.Now().UnixNano())
+		pool := sync.Pool{
+			New: func() interface{} {
+				return rand.NewSource(seedGenerator.Int63())
+			},
+		}
+
+		t.randomNumber = func() uint64 {
+			generator := pool.Get().(rand.Source)
+			number := uint64(generator.Int63())
+			pool.Put(generator)
+			return number
+		}
+	}
+	if t.timeNow == nil {
+		t.timeNow = time.Now
+	}
+	if t.logger == nil {
+		t.logger = log.NullLogger
+	}
+	// Set tracer-level tags
+	t.tags = append(t.tags, Tag{key: JaegerClientVersionTagKey, value: JaegerClientVersion})
+	if hostname, err := os.Hostname(); err == nil {
+		t.tags = append(t.tags, Tag{key: TracerHostnameTagKey, value: hostname})
+	}
+	if ipval, ok := t.getTag(TracerIPTagKey); ok {
+		ipv4, err := utils.ParseIPToUint32(ipval.(string))
+		if err != nil {
+			t.hostIPv4 = 0
+			t.logger.Error("Unable to convert the externally provided ip to uint32: " + err.Error())
+		} else {
+			t.hostIPv4 = ipv4
+		}
+	} else if ip, err := utils.HostIP(); err == nil {
+		t.tags = append(t.tags, Tag{key: TracerIPTagKey, value: ip.String()})
+		t.hostIPv4 = utils.PackIPAsUint32(ip)
+	} else {
+		t.logger.Error("Unable to determine this host's IP address: " + err.Error())
+	}
+
+	if t.options.gen128Bit {
+		if t.options.highTraceIDGenerator == nil {
+			t.options.highTraceIDGenerator = t.randomNumber
+		}
+	} else if t.options.highTraceIDGenerator != nil {
+		t.logger.Error("Overriding high trace ID generator but not generating " +
+			"128 bit trace IDs, consider enabling the \"Gen128Bit\" option")
+	}
+	if t.options.maxTagValueLength == 0 {
+		t.options.maxTagValueLength = DefaultMaxTagValueLength
+	}
+	t.process = Process{
+		Service: serviceName,
+		UUID:    strconv.FormatUint(t.randomNumber(), 16),
+		Tags:    t.tags,
+	}
+	if throttler, ok := t.debugThrottler.(ProcessSetter); ok {
+		throttler.SetProcess(t.process)
+	}
+
+	return t, t
+}
+
+// addCodec adds registers injector and extractor for given propagation format if not already defined.
+func (t *Tracer) addCodec(format interface{}, injector Injector, extractor Extractor) {
+	if _, ok := t.injectors[format]; !ok {
+		t.injectors[format] = injector
+	}
+	if _, ok := t.extractors[format]; !ok {
+		t.extractors[format] = extractor
+	}
+}
+
+// StartSpan implements StartSpan() method of opentracing.Tracer.
+func (t *Tracer) StartSpan(
+	operationName string,
+	options ...opentracing.StartSpanOption,
+) opentracing.Span {
+	sso := opentracing.StartSpanOptions{}
+	for _, o := range options {
+		o.Apply(&sso)
+	}
+	return t.startSpanWithOptions(operationName, sso)
+}
+
+func (t *Tracer) startSpanWithOptions(
+	operationName string,
+	options opentracing.StartSpanOptions,
+) opentracing.Span {
+	if options.StartTime.IsZero() {
+		options.StartTime = t.timeNow()
+	}
+
+	// Predicate whether the given span context is an empty reference
+	// or may be used as parent / debug ID / baggage items source
+	isEmptyReference := func(ctx SpanContext) bool {
+		return !ctx.IsValid() && !ctx.isDebugIDContainerOnly() && len(ctx.baggage) == 0
+	}
+
+	var references []Reference
+	var parent SpanContext
+	var hasParent bool // need this because `parent` is a value, not reference
+	var ctx SpanContext
+	var isSelfRef bool
+	for _, ref := range options.References {
+		ctxRef, ok := ref.ReferencedContext.(SpanContext)
+		if !ok {
+			t.logger.Error(fmt.Sprintf(
+				"Reference contains invalid type of SpanReference: %s",
+				reflect.ValueOf(ref.ReferencedContext)))
+			continue
+		}
+		if isEmptyReference(ctxRef) {
+			continue
+		}
+
+		if ref.Type == selfRefType {
+			isSelfRef = true
+			ctx = ctxRef
+			continue
+		}
+
+		if ctxRef.IsValid() {
+			// we don't want empty context that contains only debug-id or baggage
+			references = append(references, Reference{Type: ref.Type, Context: ctxRef})
+		}
+
+		if !hasParent {
+			parent = ctxRef
+			hasParent = ref.Type == opentracing.ChildOfRef
+		}
+	}
+	if !hasParent && !isEmptyReference(parent) {
+		// If ChildOfRef wasn't found but a FollowFromRef exists, use the context from
+		// the FollowFromRef as the parent
+		hasParent = true
+	}
+
+	rpcServer := false
+	if v, ok := options.Tags[ext.SpanKindRPCServer.Key]; ok {
+		rpcServer = (v == ext.SpanKindRPCServerEnum || v == string(ext.SpanKindRPCServerEnum))
+	}
+
+	var internalTags []Tag
+	newTrace := false
+	if !isSelfRef {
+		if !hasParent || !parent.IsValid() {
+			newTrace = true
+			ctx.traceID.Low = t.randomID()
+			if t.options.gen128Bit {
+				ctx.traceID.High = t.options.highTraceIDGenerator()
+			}
+			ctx.spanID = SpanID(ctx.traceID.Low)
+			ctx.parentID = 0
+			ctx.samplingState = &samplingState{
+				localRootSpan: ctx.spanID,
+			}
+			if hasParent && parent.isDebugIDContainerOnly() && t.isDebugAllowed(operationName) {
+				ctx.samplingState.setDebugAndSampled()
+				internalTags = append(internalTags, Tag{key: JaegerDebugHeader, value: parent.debugID})
+			}
+		} else {
+			ctx.traceID = parent.traceID
+			if rpcServer && t.options.zipkinSharedRPCSpan {
+				// Support Zipkin's one-span-per-RPC model
+				ctx.spanID = parent.spanID
+				ctx.parentID = parent.parentID
+			} else {
+				ctx.spanID = SpanID(t.randomID())
+				ctx.parentID = parent.spanID
+			}
+			ctx.samplingState = parent.samplingState
+			if parent.remote {
+				ctx.samplingState.setFinal()
+				ctx.samplingState.localRootSpan = ctx.spanID
+			}
+		}
+		if hasParent {
+			// copy baggage items
+			if l := len(parent.baggage); l > 0 {
+				ctx.baggage = make(map[string]string, len(parent.baggage))
+				for k, v := range parent.baggage {
+					ctx.baggage[k] = v
+				}
+			}
+		}
+	}
+
+	sp := t.newSpan()
+	sp.context = ctx
+	sp.tracer = t
+	sp.operationName = operationName
+	sp.startTime = options.StartTime
+	sp.duration = 0
+	sp.references = references
+	sp.firstInProcess = rpcServer || sp.context.parentID == 0
+
+	if !sp.context.isSamplingFinalized() {
+		decision := t.sampler.OnCreateSpan(sp)
+		sp.applySamplingDecision(decision, false)
+	}
+	sp.observer = t.observer.OnStartSpan(sp, operationName, options)
+
+	if tagsTotalLength := len(options.Tags) + len(internalTags); tagsTotalLength > 0 {
+		if sp.tags == nil || cap(sp.tags) < tagsTotalLength {
+			sp.tags = make([]Tag, 0, tagsTotalLength)
+		}
+		sp.tags = append(sp.tags, internalTags...)
+		for k, v := range options.Tags {
+			sp.setTagInternal(k, v, false)
+		}
+	}
+	t.emitNewSpanMetrics(sp, newTrace)
+	return sp
+}
+
+// Inject implements Inject() method of opentracing.Tracer
+func (t *Tracer) Inject(ctx opentracing.SpanContext, format interface{}, carrier interface{}) error {
+	c, ok := ctx.(SpanContext)
+	if !ok {
+		return opentracing.ErrInvalidSpanContext
+	}
+	if injector, ok := t.injectors[format]; ok {
+		return injector.Inject(c, carrier)
+	}
+	return opentracing.ErrUnsupportedFormat
+}
+
+// Extract implements Extract() method of opentracing.Tracer
+func (t *Tracer) Extract(
+	format interface{},
+	carrier interface{},
+) (opentracing.SpanContext, error) {
+	if extractor, ok := t.extractors[format]; ok {
+		spanCtx, err := extractor.Extract(carrier)
+		if err != nil {
+			return nil, err // ensure returned spanCtx is nil
+		}
+		spanCtx.remote = true
+		return spanCtx, nil
+	}
+	return nil, opentracing.ErrUnsupportedFormat
+}
+
+// Close releases all resources used by the Tracer and flushes any remaining buffered spans.
+func (t *Tracer) Close() error {
+	t.logger.Debugf("closing tracer")
+	t.reporter.Close()
+	t.sampler.Close()
+	if mgr, ok := t.baggageRestrictionManager.(io.Closer); ok {
+		_ = mgr.Close()
+	}
+	if throttler, ok := t.debugThrottler.(io.Closer); ok {
+		_ = throttler.Close()
+	}
+	return nil
+}
+
+// Tags returns a slice of tracer-level tags.
+func (t *Tracer) Tags() []opentracing.Tag {
+	tags := make([]opentracing.Tag, len(t.tags))
+	for i, tag := range t.tags {
+		tags[i] = opentracing.Tag{Key: tag.key, Value: tag.value}
+	}
+	return tags
+}
+
+// getTag returns the value of specific tag, if not exists, return nil.
+// TODO only used by tests, move there.
+func (t *Tracer) getTag(key string) (interface{}, bool) {
+	for _, tag := range t.tags {
+		if tag.key == key {
+			return tag.value, true
+		}
+	}
+	return nil, false
+}
+
+// newSpan returns an instance of a clean Span object.
+// If options.PoolSpans is true, the spans are retrieved from an object pool.
+func (t *Tracer) newSpan() *Span {
+	return t.spanAllocator.Get()
+}
+
+// emitNewSpanMetrics generates metrics on the number of started spans and traces.
+// newTrace param: we cannot simply check for parentID==0 because in Zipkin model the
+// server-side RPC span has the exact same trace/span/parent IDs as the
+// calling client-side span, but obviously the server side span is
+// no longer a root span of the trace.
+func (t *Tracer) emitNewSpanMetrics(sp *Span, newTrace bool) {
+	if !sp.context.isSamplingFinalized() {
+		t.metrics.SpansStartedDelayedSampling.Inc(1)
+		if newTrace {
+			t.metrics.TracesStartedDelayedSampling.Inc(1)
+		}
+		// joining a trace is not possible, because sampling decision inherited from upstream is final
+	} else if sp.context.IsSampled() {
+		t.metrics.SpansStartedSampled.Inc(1)
+		if newTrace {
+			t.metrics.TracesStartedSampled.Inc(1)
+		} else if sp.firstInProcess {
+			t.metrics.TracesJoinedSampled.Inc(1)
+		}
+	} else {
+		t.metrics.SpansStartedNotSampled.Inc(1)
+		if newTrace {
+			t.metrics.TracesStartedNotSampled.Inc(1)
+		} else if sp.firstInProcess {
+			t.metrics.TracesJoinedNotSampled.Inc(1)
+		}
+	}
+}
+
+func (t *Tracer) reportSpan(sp *Span) {
+	ctx := sp.SpanContext()
+
+	if !ctx.isSamplingFinalized() {
+		t.metrics.SpansFinishedDelayedSampling.Inc(1)
+	} else if ctx.IsSampled() {
+		t.metrics.SpansFinishedSampled.Inc(1)
+	} else {
+		t.metrics.SpansFinishedNotSampled.Inc(1)
+	}
+
+	// Note: if the reporter is processing Span asynchronously then it needs to Retain() the span,
+	// and then Release() it when no longer needed.
+	// Otherwise, the span may be reused for another trace and its data may be overwritten.
+	if ctx.IsSampled() {
+		t.reporter.Report(sp)
+	}
+
+	sp.Release()
+}
+
+// randomID generates a random trace/span ID, using tracer.random() generator.
+// It never returns 0.
+func (t *Tracer) randomID() uint64 {
+	val := t.randomNumber()
+	for val == 0 {
+		val = t.randomNumber()
+	}
+	return val
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) setBaggage(sp *Span, key, value string) {
+	t.baggageSetter.setBaggage(sp, key, value)
+}
+
+// (NB) span must hold the lock before making this call
+func (t *Tracer) isDebugAllowed(operation string) bool {
+	return t.debugThrottler.IsAllowed(operation)
+}
+
+// Sampler returns the sampler given to the tracer at creation.
+func (t *Tracer) Sampler() SamplerV2 {
+	return t.sampler
+}
+
+// SelfRef creates an opentracing compliant SpanReference from a jaeger
+// SpanContext. This is a factory function in order to encapsulate jaeger specific
+// types.
+func SelfRef(ctx SpanContext) opentracing.SpanReference {
+	return opentracing.SpanReference{
+		Type:              selfRefType,
+		ReferencedContext: ctx,
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/tracer_options.go b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
new file mode 100644
index 0000000..f0734b7
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/tracer_options.go
@@ -0,0 +1,182 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"time"
+
+	"github.com/opentracing/opentracing-go"
+
+	"github.com/uber/jaeger-client-go/internal/baggage"
+	"github.com/uber/jaeger-client-go/internal/throttler"
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// TracerOption is a function that sets some option on the tracer
+type TracerOption func(tracer *Tracer)
+
+// TracerOptions is a factory for all available TracerOption's
+var TracerOptions tracerOptions
+
+type tracerOptions struct{}
+
+// Metrics creates a TracerOption that initializes Metrics on the tracer,
+// which is used to emit statistics.
+func (tracerOptions) Metrics(m *Metrics) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.metrics = *m
+	}
+}
+
+// Logger creates a TracerOption that gives the tracer a Logger.
+func (tracerOptions) Logger(logger Logger) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.logger = log.DebugLogAdapter(logger)
+	}
+}
+
+func (tracerOptions) CustomHeaderKeys(headerKeys *HeadersConfig) TracerOption {
+	return func(tracer *Tracer) {
+		if headerKeys == nil {
+			return
+		}
+		textPropagator := NewTextMapPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
+		tracer.addCodec(opentracing.TextMap, textPropagator, textPropagator)
+
+		httpHeaderPropagator := NewHTTPHeaderPropagator(headerKeys.ApplyDefaults(), tracer.metrics)
+		tracer.addCodec(opentracing.HTTPHeaders, httpHeaderPropagator, httpHeaderPropagator)
+	}
+}
+
+// TimeNow creates a TracerOption that gives the tracer a function
+// used to generate timestamps for spans.
+func (tracerOptions) TimeNow(timeNow func() time.Time) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.timeNow = timeNow
+	}
+}
+
+// RandomNumber creates a TracerOption that gives the tracer
+// a thread-safe random number generator function for generating trace IDs.
+func (tracerOptions) RandomNumber(randomNumber func() uint64) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.randomNumber = randomNumber
+	}
+}
+
+// PoolSpans creates a TracerOption that tells the tracer whether it should use
+// an object pool to minimize span allocations.
+// This should be used with care, only if the service is not running any async tasks
+// that can access parent spans after those spans have been finished.
+func (tracerOptions) PoolSpans(poolSpans bool) TracerOption {
+	return func(tracer *Tracer) {
+		if poolSpans {
+			tracer.spanAllocator = newSyncPollSpanAllocator()
+		} else {
+			tracer.spanAllocator = simpleSpanAllocator{}
+		}
+	}
+}
+
+// Deprecated: HostIPv4 creates a TracerOption that identifies the current service/process.
+// If not set, the factory method will obtain the current IP address.
+// The TracerOption is deprecated; the tracer will attempt to automatically detect the IP.
+func (tracerOptions) HostIPv4(hostIPv4 uint32) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.hostIPv4 = hostIPv4
+	}
+}
+
+func (tracerOptions) Injector(format interface{}, injector Injector) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.injectors[format] = injector
+	}
+}
+
+func (tracerOptions) Extractor(format interface{}, extractor Extractor) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.extractors[format] = extractor
+	}
+}
+
+func (t tracerOptions) Observer(observer Observer) TracerOption {
+	return t.ContribObserver(&oldObserver{obs: observer})
+}
+
+func (tracerOptions) ContribObserver(observer ContribObserver) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.observer.append(observer)
+	}
+}
+
+func (tracerOptions) Gen128Bit(gen128Bit bool) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.gen128Bit = gen128Bit
+	}
+}
+
+func (tracerOptions) NoDebugFlagOnForcedSampling(noDebugFlagOnForcedSampling bool) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.noDebugFlagOnForcedSampling = noDebugFlagOnForcedSampling
+	}
+}
+
+func (tracerOptions) HighTraceIDGenerator(highTraceIDGenerator func() uint64) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.highTraceIDGenerator = highTraceIDGenerator
+	}
+}
+
+func (tracerOptions) MaxTagValueLength(maxTagValueLength int) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.maxTagValueLength = maxTagValueLength
+	}
+}
+
+// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
+// value). If a span has more logs than this value, logs are dropped as
+// necessary (and replaced with a log describing how many were dropped).
+//
+// About half of the MaxLogsPerSpan logs kept are the oldest logs, and about
+// half are the newest logs.
+func (tracerOptions) MaxLogsPerSpan(maxLogsPerSpan int) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.maxLogsPerSpan = maxLogsPerSpan
+	}
+}
+
+func (tracerOptions) ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.options.zipkinSharedRPCSpan = zipkinSharedRPCSpan
+	}
+}
+
+func (tracerOptions) Tag(key string, value interface{}) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.tags = append(tracer.tags, Tag{key: key, value: value})
+	}
+}
+
+func (tracerOptions) BaggageRestrictionManager(mgr baggage.RestrictionManager) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.baggageRestrictionManager = mgr
+	}
+}
+
+func (tracerOptions) DebugThrottler(throttler throttler.Throttler) TracerOption {
+	return func(tracer *Tracer) {
+		tracer.debugThrottler = throttler
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport.go b/vendor/github.com/uber/jaeger-client-go/transport.go
new file mode 100644
index 0000000..c5f5b19
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport.go
@@ -0,0 +1,38 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"io"
+)
+
+// Transport abstracts the method of sending spans out of process.
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+type Transport interface {
+	// Append converts the span to the wire representation and adds it
+	// to sender's internal buffer.  If the buffer exceeds its designated
+	// size, the transport should call Flush() and return the number of spans
+	// flushed, otherwise return 0. If error is returned, the returned number
+	// of spans is treated as failed span, and reported to metrics accordingly.
+	Append(span *Span) (int, error)
+
+	// Flush submits the internal buffer to the remote server. It returns the
+	// number of spans flushed. If error is returned, the returned number of
+	// spans is treated as failed span, and reported to metrics accordingly.
+	Flush() (int, error)
+
+	io.Closer
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/doc.go b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
new file mode 100644
index 0000000..6b961fb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/doc.go
@@ -0,0 +1,23 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package transport defines various transports that can be used with
+// RemoteReporter to send spans out of process. Transport is responsible
+// for serializing the spans into a specific format suitable for sending
+// to the tracing backend. Examples may include Thrift over UDP, Thrift
+// or JSON over HTTP, Thrift over Kafka, etc.
+//
+// Implementations are NOT required to be thread-safe; the RemoteReporter
+// is expected to only call methods on the Transport from the same go-routine.
+package transport
diff --git a/vendor/github.com/uber/jaeger-client-go/transport/http.go b/vendor/github.com/uber/jaeger-client-go/transport/http.go
new file mode 100644
index 0000000..1d6f14d
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport/http.go
@@ -0,0 +1,175 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package transport
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+	"time"
+
+	"github.com/uber/jaeger-client-go/thrift"
+
+	"github.com/uber/jaeger-client-go"
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+)
+
+// Default timeout for http request in seconds
+const defaultHTTPTimeout = time.Second * 5
+
+// HTTPTransport implements Transport by forwarding spans to a http server.
+type HTTPTransport struct {
+	url             string
+	client          *http.Client
+	batchSize       int
+	spans           []*j.Span
+	process         *j.Process
+	httpCredentials *HTTPBasicAuthCredentials
+	headers         map[string]string
+}
+
+// HTTPBasicAuthCredentials stores credentials for HTTP basic auth.
+type HTTPBasicAuthCredentials struct {
+	username string
+	password string
+}
+
+// HTTPOption sets a parameter for the HttpCollector
+type HTTPOption func(c *HTTPTransport)
+
+// HTTPTimeout sets maximum timeout for http request.
+func HTTPTimeout(duration time.Duration) HTTPOption {
+	return func(c *HTTPTransport) { c.client.Timeout = duration }
+}
+
+// HTTPBatchSize sets the maximum batch size, after which a collect will be
+// triggered. The default batch size is 100 spans.
+func HTTPBatchSize(n int) HTTPOption {
+	return func(c *HTTPTransport) { c.batchSize = n }
+}
+
+// HTTPBasicAuth sets the credentials required to perform HTTP basic auth
+func HTTPBasicAuth(username string, password string) HTTPOption {
+	return func(c *HTTPTransport) {
+		c.httpCredentials = &HTTPBasicAuthCredentials{username: username, password: password}
+	}
+}
+
+// HTTPRoundTripper configures the underlying Transport on the *http.Client
+// that is used
+func HTTPRoundTripper(transport http.RoundTripper) HTTPOption {
+	return func(c *HTTPTransport) {
+		c.client.Transport = transport
+	}
+}
+
+// HTTPHeaders defines the HTTP headers that will be attached to the jaeger client's HTTP request
+func HTTPHeaders(headers map[string]string) HTTPOption {
+	return func(c *HTTPTransport) {
+		c.headers = headers
+	}
+}
+
+// NewHTTPTransport returns a new HTTP-backend transport. url should be an http
+// url of the collector to handle POST request, typically something like:
+//     http://hostname:14268/api/traces?format=jaeger.thrift
+func NewHTTPTransport(url string, options ...HTTPOption) *HTTPTransport {
+	c := &HTTPTransport{
+		url:       url,
+		client:    &http.Client{Timeout: defaultHTTPTimeout},
+		batchSize: 100,
+		spans:     []*j.Span{},
+	}
+
+	for _, option := range options {
+		option(c)
+	}
+	return c
+}
+
+// Append implements Transport.
+func (c *HTTPTransport) Append(span *jaeger.Span) (int, error) {
+	if c.process == nil {
+		c.process = jaeger.BuildJaegerProcessThrift(span)
+	}
+	jSpan := jaeger.BuildJaegerThrift(span)
+	c.spans = append(c.spans, jSpan)
+	if len(c.spans) >= c.batchSize {
+		return c.Flush()
+	}
+	return 0, nil
+}
+
+// Flush implements Transport.
+func (c *HTTPTransport) Flush() (int, error) {
+	count := len(c.spans)
+	if count == 0 {
+		return 0, nil
+	}
+	err := c.send(c.spans)
+	c.spans = c.spans[:0]
+	return count, err
+}
+
+// Close implements Transport.
+func (c *HTTPTransport) Close() error {
+	return nil
+}
+
+func (c *HTTPTransport) send(spans []*j.Span) error {
+	batch := &j.Batch{
+		Spans:   spans,
+		Process: c.process,
+	}
+	body, err := serializeThrift(batch)
+	if err != nil {
+		return err
+	}
+	req, err := http.NewRequest("POST", c.url, body)
+	if err != nil {
+		return err
+	}
+	req.Header.Set("Content-Type", "application/x-thrift")
+	for k, v := range c.headers {
+		req.Header.Set(k, v)
+	}
+
+	if c.httpCredentials != nil {
+		req.SetBasicAuth(c.httpCredentials.username, c.httpCredentials.password)
+	}
+
+	resp, err := c.client.Do(req)
+	if err != nil {
+		return err
+	}
+	io.Copy(ioutil.Discard, resp.Body)
+	resp.Body.Close()
+	if resp.StatusCode >= http.StatusBadRequest {
+		return fmt.Errorf("error from collector: %d", resp.StatusCode)
+	}
+	return nil
+}
+
+func serializeThrift(obj thrift.TStruct) (*bytes.Buffer, error) {
+	t := thrift.NewTMemoryBuffer()
+	p := thrift.NewTBinaryProtocolTransport(t)
+	if err := obj.Write(context.Background(), p); err != nil {
+		return nil, err
+	}
+	return t.Buffer, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/transport_udp.go b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
new file mode 100644
index 0000000..0000412
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/transport_udp.go
@@ -0,0 +1,194 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"context"
+	"errors"
+	"fmt"
+
+	"github.com/uber/jaeger-client-go/internal/reporterstats"
+	"github.com/uber/jaeger-client-go/log"
+	"github.com/uber/jaeger-client-go/thrift"
+	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+// Empirically obtained constant for how many bytes in the message are used for envelope.
+// The total datagram size is:
+// sizeof(Span) * numSpans + processByteSize + emitBatchOverhead <= maxPacketSize
+//
+// Note that due to the use of Compact Thrift protocol, overhead grows with the number of spans
+// in the batch, because the length of the list is encoded as varint32, as well as SeqId.
+//
+// There is a unit test `TestEmitBatchOverhead` that validates this number, it fails at <68.
+const emitBatchOverhead = 70
+
+var errSpanTooLarge = errors.New("span is too large")
+
+type udpSender struct {
+	client          *utils.AgentClientUDP
+	maxPacketSize   int                   // max size of datagram in bytes
+	maxSpanBytes    int                   // max number of bytes to record spans (excluding envelope) in the datagram
+	byteBufferSize  int                   // current number of span bytes accumulated in the buffer
+	spanBuffer      []*j.Span             // spans buffered before a flush
+	thriftBuffer    *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+	thriftProtocol  thrift.TProtocol
+	process         *j.Process
+	processByteSize int
+
+	// reporterStats provides access to stats that are only known to Reporter
+	reporterStats reporterstats.ReporterStats
+
+	// The following counters are always non-negative, but we need to send them in signed i64 Thrift fields,
+	// so we keep them as signed. At 10k QPS, overflow happens in about 300 million years.
+	batchSeqNo           int64
+	tooLargeDroppedSpans int64
+	failedToEmitSpans    int64
+}
+
+// UDPTransportParams allows specifying options for initializing a UDPTransport. An instance of this struct should
+// be passed to NewUDPTransportWithParams.
+type UDPTransportParams struct {
+	utils.AgentClientUDPParams
+}
+
+// NewUDPTransportWithParams creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
+func NewUDPTransportWithParams(params UDPTransportParams) (Transport, error) {
+	if len(params.HostPort) == 0 {
+		params.HostPort = fmt.Sprintf("%s:%d", DefaultUDPSpanServerHost, DefaultUDPSpanServerPort)
+	}
+
+	if params.Logger == nil {
+		params.Logger = log.StdLogger
+	}
+
+	if params.MaxPacketSize == 0 {
+		params.MaxPacketSize = utils.UDPPacketMaxLength
+	}
+
+	protocolFactory := thrift.NewTCompactProtocolFactory()
+
+	// Each span is first written to thriftBuffer to determine its size in bytes.
+	thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
+	thriftProtocol := protocolFactory.GetProtocol(thriftBuffer)
+
+	client, err := utils.NewAgentClientUDPWithParams(params.AgentClientUDPParams)
+	if err != nil {
+		return nil, err
+	}
+
+	return &udpSender{
+		client:         client,
+		maxSpanBytes:   params.MaxPacketSize - emitBatchOverhead,
+		thriftBuffer:   thriftBuffer,
+		thriftProtocol: thriftProtocol,
+	}, nil
+}
+
+// NewUDPTransport creates a reporter that submits spans to jaeger-agent.
+// TODO: (breaking change) move to transport/ package.
+func NewUDPTransport(hostPort string, maxPacketSize int) (Transport, error) {
+	return NewUDPTransportWithParams(UDPTransportParams{
+		AgentClientUDPParams: utils.AgentClientUDPParams{
+			HostPort:      hostPort,
+			MaxPacketSize: maxPacketSize,
+		},
+	})
+}
+
+// SetReporterStats implements reporterstats.Receiver.
+func (s *udpSender) SetReporterStats(rs reporterstats.ReporterStats) {
+	s.reporterStats = rs
+}
+
+func (s *udpSender) calcSizeOfSerializedThrift(thriftStruct thrift.TStruct) int {
+	s.thriftBuffer.Reset()
+	_ = thriftStruct.Write(context.Background(), s.thriftProtocol)
+	return s.thriftBuffer.Len()
+}
+
+func (s *udpSender) Append(span *Span) (int, error) {
+	if s.process == nil {
+		s.process = BuildJaegerProcessThrift(span)
+		s.processByteSize = s.calcSizeOfSerializedThrift(s.process)
+		s.byteBufferSize += s.processByteSize
+	}
+	jSpan := BuildJaegerThrift(span)
+	spanSize := s.calcSizeOfSerializedThrift(jSpan)
+	if spanSize > s.maxSpanBytes {
+		s.tooLargeDroppedSpans++
+		return 1, errSpanTooLarge
+	}
+
+	s.byteBufferSize += spanSize
+	if s.byteBufferSize <= s.maxSpanBytes {
+		s.spanBuffer = append(s.spanBuffer, jSpan)
+		if s.byteBufferSize < s.maxSpanBytes {
+			return 0, nil
+		}
+		return s.Flush()
+	}
+	// the latest span did not fit in the buffer
+	n, err := s.Flush()
+	s.spanBuffer = append(s.spanBuffer, jSpan)
+	s.byteBufferSize = spanSize + s.processByteSize
+	return n, err
+}
+
+func (s *udpSender) Flush() (int, error) {
+	n := len(s.spanBuffer)
+	if n == 0 {
+		return 0, nil
+	}
+	s.batchSeqNo++
+	batchSeqNo := int64(s.batchSeqNo)
+	err := s.client.EmitBatch(context.Background(), &j.Batch{
+		Process: s.process,
+		Spans:   s.spanBuffer,
+		SeqNo:   &batchSeqNo,
+		Stats:   s.makeStats(),
+	})
+	s.resetBuffers()
+	if err != nil {
+		s.failedToEmitSpans += int64(n)
+	}
+	return n, err
+}
+
+func (s *udpSender) Close() error {
+	return s.client.Close()
+}
+
+func (s *udpSender) resetBuffers() {
+	for i := range s.spanBuffer {
+		s.spanBuffer[i] = nil
+	}
+	s.spanBuffer = s.spanBuffer[:0]
+	s.byteBufferSize = s.processByteSize
+}
+
+func (s *udpSender) makeStats() *j.ClientStats {
+	var dropped int64
+	if s.reporterStats != nil {
+		dropped = s.reporterStats.SpansDroppedFromQueue()
+	}
+	return &j.ClientStats{
+		FullQueueDroppedSpans: dropped,
+		TooLargeDroppedSpans:  s.tooLargeDroppedSpans,
+		FailedToEmitSpans:     s.failedToEmitSpans,
+	}
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/http_json.go b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
new file mode 100644
index 0000000..237211f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/http_json.go
@@ -0,0 +1,54 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"encoding/json"
+	"fmt"
+	"io"
+	"io/ioutil"
+	"net/http"
+)
+
+// GetJSON makes an HTTP call to the specified URL and parses the returned JSON into `out`.
+func GetJSON(url string, out interface{}) error {
+	resp, err := http.Get(url)
+	if err != nil {
+		return err
+	}
+	return ReadJSON(resp, out)
+}
+
+// ReadJSON reads JSON from http.Response and parses it into `out`
+func ReadJSON(resp *http.Response, out interface{}) error {
+	defer resp.Body.Close()
+
+	if resp.StatusCode >= 400 {
+		body, err := ioutil.ReadAll(resp.Body)
+		if err != nil {
+			return err
+		}
+
+		return fmt.Errorf("StatusCode: %d, Body: %s", resp.StatusCode, body)
+	}
+
+	if out == nil {
+		io.Copy(ioutil.Discard, resp.Body)
+		return nil
+	}
+
+	decoder := json.NewDecoder(resp.Body)
+	return decoder.Decode(out)
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/localip.go b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
new file mode 100644
index 0000000..b51af77
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/localip.go
@@ -0,0 +1,84 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"errors"
+	"net"
+)
+
+// This code is borrowed from https://github.com/uber/tchannel-go/blob/dev/localip.go
+
+// scoreAddr scores how likely the given addr is to be a remote address and returns the
+// IP to use when listening. Any address which receives a negative score should not be used.
+// Scores are calculated as:
+// -1 for any unknown IP addresses.
+// +300 for IPv4 addresses
+// +100 for non-local addresses, extra +100 for "up" interaces.
+func scoreAddr(iface net.Interface, addr net.Addr) (int, net.IP) {
+	var ip net.IP
+	if netAddr, ok := addr.(*net.IPNet); ok {
+		ip = netAddr.IP
+	} else if netIP, ok := addr.(*net.IPAddr); ok {
+		ip = netIP.IP
+	} else {
+		return -1, nil
+	}
+
+	var score int
+	if ip.To4() != nil {
+		score += 300
+	}
+	if iface.Flags&net.FlagLoopback == 0 && !ip.IsLoopback() {
+		score += 100
+		if iface.Flags&net.FlagUp != 0 {
+			score += 100
+		}
+	}
+	return score, ip
+}
+
+// HostIP tries to find an IP that can be used by other machines to reach this machine.
+func HostIP() (net.IP, error) {
+	interfaces, err := net.Interfaces()
+	if err != nil {
+		return nil, err
+	}
+
+	bestScore := -1
+	var bestIP net.IP
+	// Select the highest scoring IP as the best IP.
+	for _, iface := range interfaces {
+		addrs, err := iface.Addrs()
+		if err != nil {
+			// Skip this interface if there is an error.
+			continue
+		}
+
+		for _, addr := range addrs {
+			score, ip := scoreAddr(iface, addr)
+			if score > bestScore {
+				bestScore = score
+				bestIP = ip
+			}
+		}
+	}
+
+	if bestScore == -1 {
+		return nil, errors.New("no addresses to listen on")
+	}
+
+	return bestIP, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rand.go b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
new file mode 100644
index 0000000..9875f7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rand.go
@@ -0,0 +1,46 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"math/rand"
+	"sync"
+)
+
+// lockedSource allows a random number generator to be used by multiple goroutines concurrently.
+// The code is very similar to math/rand.lockedSource, which is unfortunately not exposed.
+type lockedSource struct {
+	mut sync.Mutex
+	src rand.Source
+}
+
+// NewRand returns a rand.Rand that is threadsafe.
+func NewRand(seed int64) *rand.Rand {
+	return rand.New(&lockedSource{src: rand.NewSource(seed)})
+}
+
+func (r *lockedSource) Int63() (n int64) {
+	r.mut.Lock()
+	n = r.src.Int63()
+	r.mut.Unlock()
+	return
+}
+
+// Seed implements Seed() of Source
+func (r *lockedSource) Seed(seed int64) {
+	r.mut.Lock()
+	r.src.Seed(seed)
+	r.mut.Unlock()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
new file mode 100644
index 0000000..bf2f131
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/rate_limiter.go
@@ -0,0 +1,112 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"sync"
+	"time"
+)
+
+// RateLimiter is a filter used to check if a message that is worth itemCost units is within the rate limits.
+//
+// TODO (breaking change) remove this interface in favor of public struct below
+//
+// Deprecated, use ReconfigurableRateLimiter.
+type RateLimiter interface {
+	CheckCredit(itemCost float64) bool
+}
+
+// ReconfigurableRateLimiter is a rate limiter based on leaky bucket algorithm, formulated in terms of a
+// credits balance that is replenished every time CheckCredit() method is called (tick) by the amount proportional
+// to the time elapsed since the last tick, up to max of creditsPerSecond. A call to CheckCredit() takes a cost
+// of an item we want to pay with the balance. If the balance exceeds the cost of the item, the item is "purchased"
+// and the balance reduced, indicated by returned value of true. Otherwise the balance is unchanged and return false.
+//
+// This can be used to limit a rate of messages emitted by a service by instantiating the Rate Limiter with the
+// max number of messages a service is allowed to emit per second, and calling CheckCredit(1.0) for each message
+// to determine if the message is within the rate limit.
+//
+// It can also be used to limit the rate of traffic in bytes, by setting creditsPerSecond to desired throughput
+// as bytes/second, and calling CheckCredit() with the actual message size.
+//
+// TODO (breaking change) rename to RateLimiter once the interface is removed
+type ReconfigurableRateLimiter struct {
+	lock sync.Mutex
+
+	creditsPerSecond float64
+	balance          float64
+	maxBalance       float64
+	lastTick         time.Time
+
+	timeNow func() time.Time
+}
+
+// NewRateLimiter creates a new ReconfigurableRateLimiter.
+func NewRateLimiter(creditsPerSecond, maxBalance float64) *ReconfigurableRateLimiter {
+	return &ReconfigurableRateLimiter{
+		creditsPerSecond: creditsPerSecond,
+		balance:          maxBalance,
+		maxBalance:       maxBalance,
+		lastTick:         time.Now(),
+		timeNow:          time.Now,
+	}
+}
+
+// CheckCredit tries to reduce the current balance by itemCost provided that the current balance
+// is not lest than itemCost.
+func (rl *ReconfigurableRateLimiter) CheckCredit(itemCost float64) bool {
+	rl.lock.Lock()
+	defer rl.lock.Unlock()
+
+	// if we have enough credits to pay for current item, then reduce balance and allow
+	if rl.balance >= itemCost {
+		rl.balance -= itemCost
+		return true
+	}
+	// otherwise check if balance can be increased due to time elapsed, and try again
+	rl.updateBalance()
+	if rl.balance >= itemCost {
+		rl.balance -= itemCost
+		return true
+	}
+	return false
+}
+
+// updateBalance recalculates current balance based on time elapsed. Must be called while holding a lock.
+func (rl *ReconfigurableRateLimiter) updateBalance() {
+	// calculate how much time passed since the last tick, and update current tick
+	currentTime := rl.timeNow()
+	elapsedTime := currentTime.Sub(rl.lastTick)
+	rl.lastTick = currentTime
+	// calculate how much credit have we accumulated since the last tick
+	rl.balance += elapsedTime.Seconds() * rl.creditsPerSecond
+	if rl.balance > rl.maxBalance {
+		rl.balance = rl.maxBalance
+	}
+}
+
+// Update changes the main parameters of the rate limiter in-place, while retaining
+// the current accumulated balance (pro-rated to the new maxBalance value). Using this method
+// instead of creating a new rate limiter helps to avoid thundering herd when sampling
+// strategies are updated.
+func (rl *ReconfigurableRateLimiter) Update(creditsPerSecond, maxBalance float64) {
+	rl.lock.Lock()
+	defer rl.lock.Unlock()
+
+	rl.updateBalance() // get up to date balance
+	rl.balance = rl.balance * maxBalance / rl.maxBalance
+	rl.creditsPerSecond = creditsPerSecond
+	rl.maxBalance = maxBalance
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
new file mode 100644
index 0000000..0dffc7f
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/reconnecting_udp_conn.go
@@ -0,0 +1,189 @@
+// Copyright (c) 2020 The Jaeger Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"fmt"
+	"net"
+	"sync"
+	"sync/atomic"
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+)
+
+// reconnectingUDPConn is an implementation of udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+type reconnectingUDPConn struct {
+	hostPort    string
+	resolveFunc resolveFunc
+	dialFunc    dialFunc
+	logger      log.Logger
+	bufferBytes int64
+
+	connMtx   sync.RWMutex
+	conn      *net.UDPConn
+	destAddr  *net.UDPAddr
+	closeChan chan struct{}
+}
+
+type resolveFunc func(network string, hostPort string) (*net.UDPAddr, error)
+type dialFunc func(network string, laddr, raddr *net.UDPAddr) (*net.UDPConn, error)
+
+// newReconnectingUDPConn returns a new udpConn that resolves hostPort every resolveTimeout, if the resolved address is
+// different than the current conn then the new address is dialed and the conn is swapped.
+func newReconnectingUDPConn(hostPort string, resolveTimeout time.Duration, resolveFunc resolveFunc, dialFunc dialFunc, logger log.Logger) (*reconnectingUDPConn, error) {
+	conn := &reconnectingUDPConn{
+		hostPort:    hostPort,
+		resolveFunc: resolveFunc,
+		dialFunc:    dialFunc,
+		logger:      logger,
+		closeChan:   make(chan struct{}),
+	}
+
+	if err := conn.attemptResolveAndDial(); err != nil {
+		logger.Error(fmt.Sprintf("failed resolving destination address on connection startup, with err: %q. retrying in %s", err.Error(), resolveTimeout))
+	}
+
+	go conn.reconnectLoop(resolveTimeout)
+
+	return conn, nil
+}
+
+func (c *reconnectingUDPConn) reconnectLoop(resolveTimeout time.Duration) {
+	ticker := time.NewTicker(resolveTimeout)
+	defer ticker.Stop()
+
+	for {
+		select {
+		case <-c.closeChan:
+			return
+		case <-ticker.C:
+			if err := c.attemptResolveAndDial(); err != nil {
+				c.logger.Error(err.Error())
+			}
+		}
+	}
+}
+
+func (c *reconnectingUDPConn) attemptResolveAndDial() error {
+	newAddr, err := c.resolveFunc("udp", c.hostPort)
+	if err != nil {
+		return fmt.Errorf("failed to resolve new addr for host %q, with err: %w", c.hostPort, err)
+	}
+
+	c.connMtx.RLock()
+	curAddr := c.destAddr
+	c.connMtx.RUnlock()
+
+	// dont attempt dial if an addr was successfully dialed previously and, resolved addr is the same as current conn
+	if curAddr != nil && newAddr.String() == curAddr.String() {
+		return nil
+	}
+
+	if err := c.attemptDialNewAddr(newAddr); err != nil {
+		return fmt.Errorf("failed to dial newly resolved addr '%s', with err: %w", newAddr, err)
+	}
+
+	return nil
+}
+
+func (c *reconnectingUDPConn) attemptDialNewAddr(newAddr *net.UDPAddr) error {
+	connUDP, err := c.dialFunc(newAddr.Network(), nil, newAddr)
+	if err != nil {
+		return err
+	}
+
+	if bufferBytes := int(atomic.LoadInt64(&c.bufferBytes)); bufferBytes != 0 {
+		if err = connUDP.SetWriteBuffer(bufferBytes); err != nil {
+			return err
+		}
+	}
+
+	c.connMtx.Lock()
+	c.destAddr = newAddr
+	// store prev to close later
+	prevConn := c.conn
+	c.conn = connUDP
+	c.connMtx.Unlock()
+
+	if prevConn != nil {
+		return prevConn.Close()
+	}
+
+	return nil
+}
+
+// Write calls net.udpConn.Write, if it fails an attempt is made to connect to a new addr, if that succeeds the write is retried before returning
+func (c *reconnectingUDPConn) Write(b []byte) (int, error) {
+	var bytesWritten int
+	var err error
+
+	c.connMtx.RLock()
+	if c.conn == nil {
+		// if connection is not initialized indicate this with err in order to hook into retry logic
+		err = fmt.Errorf("UDP connection not yet initialized, an address has not been resolved")
+	} else {
+		bytesWritten, err = c.conn.Write(b)
+	}
+	c.connMtx.RUnlock()
+
+	if err == nil {
+		return bytesWritten, nil
+	}
+
+	// attempt to resolve and dial new address in case that's the problem, if resolve and dial succeeds, try write again
+	if reconnErr := c.attemptResolveAndDial(); reconnErr == nil {
+		c.connMtx.RLock()
+		defer c.connMtx.RUnlock()
+		return c.conn.Write(b)
+	}
+
+	// return original error if reconn fails
+	return bytesWritten, err
+}
+
+// Close stops the reconnectLoop, then closes the connection via net.udpConn 's implementation
+func (c *reconnectingUDPConn) Close() error {
+	close(c.closeChan)
+
+	// acquire rw lock before closing conn to ensure calls to Write drain
+	c.connMtx.Lock()
+	defer c.connMtx.Unlock()
+
+	if c.conn != nil {
+		return c.conn.Close()
+	}
+
+	return nil
+}
+
+// SetWriteBuffer defers to the net.udpConn SetWriteBuffer implementation wrapped with a RLock. if no conn is currently held
+// and SetWriteBuffer is called store bufferBytes to be set for new conns
+func (c *reconnectingUDPConn) SetWriteBuffer(bytes int) error {
+	var err error
+
+	c.connMtx.RLock()
+	if c.conn != nil {
+		err = c.conn.SetWriteBuffer(bytes)
+	}
+	c.connMtx.RUnlock()
+
+	if err == nil {
+		atomic.StoreInt64(&c.bufferBytes, int64(bytes))
+	}
+
+	return err
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
new file mode 100644
index 0000000..4c59ae9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/udp_client.go
@@ -0,0 +1,149 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"io"
+	"net"
+	"time"
+
+	"github.com/uber/jaeger-client-go/log"
+	"github.com/uber/jaeger-client-go/thrift"
+
+	"github.com/uber/jaeger-client-go/thrift-gen/agent"
+	"github.com/uber/jaeger-client-go/thrift-gen/jaeger"
+	"github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+)
+
+// UDPPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent
+const UDPPacketMaxLength = 65000
+
+// AgentClientUDP is a UDP client to Jaeger agent that implements agent.Agent interface.
+type AgentClientUDP struct {
+	agent.Agent
+	io.Closer
+
+	connUDP       udpConn
+	client        *agent.AgentClient
+	maxPacketSize int                   // max size of datagram in bytes
+	thriftBuffer  *thrift.TMemoryBuffer // buffer used to calculate byte size of a span
+}
+
+type udpConn interface {
+	Write([]byte) (int, error)
+	SetWriteBuffer(int) error
+	Close() error
+}
+
+// AgentClientUDPParams allows specifying options for initializing an AgentClientUDP. An instance of this struct should
+// be passed to NewAgentClientUDPWithParams.
+type AgentClientUDPParams struct {
+	HostPort                   string
+	MaxPacketSize              int
+	Logger                     log.Logger
+	DisableAttemptReconnecting bool
+	AttemptReconnectInterval   time.Duration
+}
+
+// NewAgentClientUDPWithParams creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDPWithParams(params AgentClientUDPParams) (*AgentClientUDP, error) {
+	// validate hostport
+	if _, _, err := net.SplitHostPort(params.HostPort); err != nil {
+		return nil, err
+	}
+
+	if params.MaxPacketSize == 0 {
+		params.MaxPacketSize = UDPPacketMaxLength
+	}
+
+	if params.Logger == nil {
+		params.Logger = log.StdLogger
+	}
+
+	if !params.DisableAttemptReconnecting && params.AttemptReconnectInterval == 0 {
+		params.AttemptReconnectInterval = time.Second * 30
+	}
+
+	thriftBuffer := thrift.NewTMemoryBufferLen(params.MaxPacketSize)
+	protocolFactory := thrift.NewTCompactProtocolFactory()
+	client := agent.NewAgentClientFactory(thriftBuffer, protocolFactory)
+
+	var connUDP udpConn
+	var err error
+
+	if params.DisableAttemptReconnecting {
+		destAddr, err := net.ResolveUDPAddr("udp", params.HostPort)
+		if err != nil {
+			return nil, err
+		}
+
+		connUDP, err = net.DialUDP(destAddr.Network(), nil, destAddr)
+		if err != nil {
+			return nil, err
+		}
+	} else {
+		// host is hostname, setup resolver loop in case host record changes during operation
+		connUDP, err = newReconnectingUDPConn(params.HostPort, params.AttemptReconnectInterval, net.ResolveUDPAddr, net.DialUDP, params.Logger)
+		if err != nil {
+			return nil, err
+		}
+	}
+
+	if err := connUDP.SetWriteBuffer(params.MaxPacketSize); err != nil {
+		return nil, err
+	}
+
+	return &AgentClientUDP{
+		connUDP:       connUDP,
+		client:        client,
+		maxPacketSize: params.MaxPacketSize,
+		thriftBuffer:  thriftBuffer,
+	}, nil
+}
+
+// NewAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP.
+func NewAgentClientUDP(hostPort string, maxPacketSize int) (*AgentClientUDP, error) {
+	return NewAgentClientUDPWithParams(AgentClientUDPParams{
+		HostPort:      hostPort,
+		MaxPacketSize: maxPacketSize,
+	})
+}
+
+// EmitZipkinBatch implements EmitZipkinBatch() of Agent interface
+func (a *AgentClientUDP) EmitZipkinBatch(context.Context, []*zipkincore.Span) error {
+	return errors.New("Not implemented")
+}
+
+// EmitBatch implements EmitBatch() of Agent interface
+func (a *AgentClientUDP) EmitBatch(ctx context.Context, batch *jaeger.Batch) error {
+	a.thriftBuffer.Reset()
+	if err := a.client.EmitBatch(ctx, batch); err != nil {
+		return err
+	}
+	if a.thriftBuffer.Len() > a.maxPacketSize {
+		return fmt.Errorf("data does not fit within one UDP packet; size %d, max %d, spans %d",
+			a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans))
+	}
+	_, err := a.connUDP.Write(a.thriftBuffer.Bytes())
+	return err
+}
+
+// Close implements Close() of io.Closer and closes the underlying UDP connection.
+func (a *AgentClientUDP) Close() error {
+	return a.connUDP.Close()
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/utils/utils.go b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
new file mode 100644
index 0000000..ac3c325
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/utils/utils.go
@@ -0,0 +1,87 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package utils
+
+import (
+	"encoding/binary"
+	"errors"
+	"net"
+	"strconv"
+	"strings"
+	"time"
+)
+
+var (
+	// ErrEmptyIP an error for empty ip strings
+	ErrEmptyIP = errors.New("empty string given for ip")
+
+	// ErrNotHostColonPort an error for invalid host port string
+	ErrNotHostColonPort = errors.New("expecting host:port")
+
+	// ErrNotFourOctets an error for the wrong number of octets after splitting a string
+	ErrNotFourOctets = errors.New("Wrong number of octets")
+)
+
+// ParseIPToUint32 converts a string ip (e.g. "x.y.z.w") to an uint32
+func ParseIPToUint32(ip string) (uint32, error) {
+	if ip == "" {
+		return 0, ErrEmptyIP
+	}
+
+	if ip == "localhost" {
+		return 127<<24 | 1, nil
+	}
+
+	octets := strings.Split(ip, ".")
+	if len(octets) != 4 {
+		return 0, ErrNotFourOctets
+	}
+
+	var intIP uint32
+	for i := 0; i < 4; i++ {
+		octet, err := strconv.Atoi(octets[i])
+		if err != nil {
+			return 0, err
+		}
+		intIP = (intIP << 8) | uint32(octet)
+	}
+
+	return intIP, nil
+}
+
+// ParsePort converts port number from string to uin16
+func ParsePort(portString string) (uint16, error) {
+	port, err := strconv.ParseUint(portString, 10, 16)
+	return uint16(port), err
+}
+
+// PackIPAsUint32 packs an IPv4 as uint32
+func PackIPAsUint32(ip net.IP) uint32 {
+	if ipv4 := ip.To4(); ipv4 != nil {
+		return binary.BigEndian.Uint32(ipv4)
+	}
+	return 0
+}
+
+// TimeToMicrosecondsSinceEpochInt64 converts Go time.Time to a long
+// representing time since epoch in microseconds, which is used expected
+// in the Jaeger spans encoded as Thrift.
+func TimeToMicrosecondsSinceEpochInt64(t time.Time) int64 {
+	// ^^^ Passing time.Time by value is faster than passing a pointer!
+	// BenchmarkTimeByValue-8	2000000000	         1.37 ns/op
+	// BenchmarkTimeByPtr-8  	2000000000	         1.98 ns/op
+
+	return t.UnixNano() / 1000
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin.go b/vendor/github.com/uber/jaeger-client-go/zipkin.go
new file mode 100644
index 0000000..98cab4b
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin.go
@@ -0,0 +1,77 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"github.com/opentracing/opentracing-go"
+)
+
+// ZipkinSpanFormat is an OpenTracing carrier format constant
+const ZipkinSpanFormat = "zipkin-span-format"
+
+// ExtractableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type ExtractableZipkinSpan interface {
+	TraceID() uint64
+	SpanID() uint64
+	ParentID() uint64
+	Flags() byte
+}
+
+// InjectableZipkinSpan is a type of Carrier used for integration with Zipkin-aware
+// RPC frameworks (like TChannel). It does not support baggage, only trace IDs.
+type InjectableZipkinSpan interface {
+	SetTraceID(traceID uint64)
+	SetSpanID(spanID uint64)
+	SetParentID(parentID uint64)
+	SetFlags(flags byte)
+}
+
+type zipkinPropagator struct {
+	tracer *Tracer
+}
+
+func (p *zipkinPropagator) Inject(
+	ctx SpanContext,
+	abstractCarrier interface{},
+) error {
+	carrier, ok := abstractCarrier.(InjectableZipkinSpan)
+	if !ok {
+		return opentracing.ErrInvalidCarrier
+	}
+
+	carrier.SetTraceID(ctx.TraceID().Low) // TODO this cannot work with 128bit IDs
+	carrier.SetSpanID(uint64(ctx.SpanID()))
+	carrier.SetParentID(uint64(ctx.ParentID()))
+	carrier.SetFlags(ctx.samplingState.flags())
+	return nil
+}
+
+func (p *zipkinPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) {
+	carrier, ok := abstractCarrier.(ExtractableZipkinSpan)
+	if !ok {
+		return emptyContext, opentracing.ErrInvalidCarrier
+	}
+	if carrier.TraceID() == 0 {
+		return emptyContext, opentracing.ErrSpanContextNotFound
+	}
+	var ctx SpanContext
+	ctx.traceID.Low = carrier.TraceID()
+	ctx.spanID = SpanID(carrier.SpanID())
+	ctx.parentID = SpanID(carrier.ParentID())
+	ctx.samplingState = &samplingState{}
+	ctx.samplingState.setFlags(carrier.Flags())
+	return ctx, nil
+}
diff --git a/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
new file mode 100644
index 0000000..73aeb00
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-client-go/zipkin_thrift_span.go
@@ -0,0 +1,329 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package jaeger
+
+import (
+	"encoding/binary"
+	"fmt"
+	"time"
+
+	"github.com/opentracing/opentracing-go/ext"
+
+	"github.com/uber/jaeger-client-go/internal/spanlog"
+	z "github.com/uber/jaeger-client-go/thrift-gen/zipkincore"
+	"github.com/uber/jaeger-client-go/utils"
+)
+
+const (
+	// Zipkin UI does not work well with non-string tag values
+	allowPackedNumbers = false
+)
+
+var specialTagHandlers = map[string]func(*zipkinSpan, interface{}){
+	string(ext.SpanKind):     setSpanKind,
+	string(ext.PeerHostIPv4): setPeerIPv4,
+	string(ext.PeerPort):     setPeerPort,
+	string(ext.PeerService):  setPeerService,
+	TracerIPTagKey:           removeTag,
+}
+
+// BuildZipkinThrift builds thrift span based on internal span.
+// TODO: (breaking change) move to transport/zipkin and make private.
+func BuildZipkinThrift(s *Span) *z.Span {
+	span := &zipkinSpan{Span: s}
+	span.handleSpecialTags()
+	parentID := int64(span.context.parentID)
+	var ptrParentID *int64
+	if parentID != 0 {
+		ptrParentID = &parentID
+	}
+	traceIDHigh := int64(span.context.traceID.High)
+	var ptrTraceIDHigh *int64
+	if traceIDHigh != 0 {
+		ptrTraceIDHigh = &traceIDHigh
+	}
+	timestamp := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime)
+	duration := span.duration.Nanoseconds() / int64(time.Microsecond)
+	endpoint := &z.Endpoint{
+		ServiceName: span.tracer.serviceName,
+		Ipv4:        int32(span.tracer.hostIPv4)}
+	thriftSpan := &z.Span{
+		TraceID:           int64(span.context.traceID.Low),
+		TraceIDHigh:       ptrTraceIDHigh,
+		ID:                int64(span.context.spanID),
+		ParentID:          ptrParentID,
+		Name:              span.operationName,
+		Timestamp:         &timestamp,
+		Duration:          &duration,
+		Debug:             span.context.IsDebug(),
+		Annotations:       buildAnnotations(span, endpoint),
+		BinaryAnnotations: buildBinaryAnnotations(span, endpoint)}
+	return thriftSpan
+}
+
+func buildAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.Annotation {
+	// automatically adding 2 Zipkin CoreAnnotations
+	annotations := make([]*z.Annotation, 0, 2+len(span.logs))
+	var startLabel, endLabel string
+	if span.spanKind == string(ext.SpanKindRPCClientEnum) {
+		startLabel, endLabel = z.CLIENT_SEND, z.CLIENT_RECV
+	} else if span.spanKind == string(ext.SpanKindRPCServerEnum) {
+		startLabel, endLabel = z.SERVER_RECV, z.SERVER_SEND
+	}
+	if !span.startTime.IsZero() && startLabel != "" {
+		start := &z.Annotation{
+			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(span.startTime),
+			Value:     startLabel,
+			Host:      endpoint}
+		annotations = append(annotations, start)
+		if span.duration != 0 {
+			endTs := span.startTime.Add(span.duration)
+			end := &z.Annotation{
+				Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(endTs),
+				Value:     endLabel,
+				Host:      endpoint}
+			annotations = append(annotations, end)
+		}
+	}
+	for _, log := range span.logs {
+		anno := &z.Annotation{
+			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp),
+			Host:      endpoint}
+		if content, err := spanlog.MaterializeWithJSON(log.Fields); err == nil {
+			anno.Value = truncateString(string(content), span.tracer.options.maxTagValueLength)
+		} else {
+			anno.Value = err.Error()
+		}
+		annotations = append(annotations, anno)
+	}
+	return annotations
+}
+
+func buildBinaryAnnotations(span *zipkinSpan, endpoint *z.Endpoint) []*z.BinaryAnnotation {
+	// automatically adding local component or server/client address tag, and client version
+	annotations := make([]*z.BinaryAnnotation, 0, 2+len(span.tags))
+
+	if span.peerDefined() && span.isRPC() {
+		peer := z.Endpoint{
+			Ipv4:        span.peer.Ipv4,
+			Port:        span.peer.Port,
+			ServiceName: span.peer.ServiceName}
+		label := z.CLIENT_ADDR
+		if span.isRPCClient() {
+			label = z.SERVER_ADDR
+		}
+		anno := &z.BinaryAnnotation{
+			Key:            label,
+			Value:          []byte{1},
+			AnnotationType: z.AnnotationType_BOOL,
+			Host:           &peer}
+		annotations = append(annotations, anno)
+	}
+	if !span.isRPC() {
+		componentName := endpoint.ServiceName
+		for _, tag := range span.tags {
+			if tag.key == string(ext.Component) {
+				componentName = stringify(tag.value)
+				break
+			}
+		}
+		local := &z.BinaryAnnotation{
+			Key:            z.LOCAL_COMPONENT,
+			Value:          []byte(componentName),
+			AnnotationType: z.AnnotationType_STRING,
+			Host:           endpoint}
+		annotations = append(annotations, local)
+	}
+	for _, tag := range span.tags {
+		// "Special tags" are already handled by this point, we'd be double reporting the
+		// tags if we don't skip here
+		if _, ok := specialTagHandlers[tag.key]; ok {
+			continue
+		}
+		if anno := buildBinaryAnnotation(tag.key, tag.value, span.tracer.options.maxTagValueLength, nil); anno != nil {
+			annotations = append(annotations, anno)
+		}
+	}
+	return annotations
+}
+
+func buildBinaryAnnotation(key string, val interface{}, maxTagValueLength int, endpoint *z.Endpoint) *z.BinaryAnnotation {
+	bann := &z.BinaryAnnotation{Key: key, Host: endpoint}
+	if value, ok := val.(string); ok {
+		bann.Value = []byte(truncateString(value, maxTagValueLength))
+		bann.AnnotationType = z.AnnotationType_STRING
+	} else if value, ok := val.([]byte); ok {
+		if len(value) > maxTagValueLength {
+			value = value[:maxTagValueLength]
+		}
+		bann.Value = value
+		bann.AnnotationType = z.AnnotationType_BYTES
+	} else if value, ok := val.(int32); ok && allowPackedNumbers {
+		bann.Value = int32ToBytes(value)
+		bann.AnnotationType = z.AnnotationType_I32
+	} else if value, ok := val.(int64); ok && allowPackedNumbers {
+		bann.Value = int64ToBytes(value)
+		bann.AnnotationType = z.AnnotationType_I64
+	} else if value, ok := val.(int); ok && allowPackedNumbers {
+		bann.Value = int64ToBytes(int64(value))
+		bann.AnnotationType = z.AnnotationType_I64
+	} else if value, ok := val.(bool); ok {
+		bann.Value = []byte{boolToByte(value)}
+		bann.AnnotationType = z.AnnotationType_BOOL
+	} else {
+		value := stringify(val)
+		bann.Value = []byte(truncateString(value, maxTagValueLength))
+		bann.AnnotationType = z.AnnotationType_STRING
+	}
+	return bann
+}
+
+func stringify(value interface{}) string {
+	if s, ok := value.(string); ok {
+		return s
+	}
+	return fmt.Sprintf("%+v", value)
+}
+
+func truncateString(value string, maxLength int) string {
+	// we ignore the problem of utf8 runes possibly being sliced in the middle,
+	// as it is rather expensive to iterate through each tag just to find rune
+	// boundaries.
+	if len(value) > maxLength {
+		return value[:maxLength]
+	}
+	return value
+}
+
+func boolToByte(b bool) byte {
+	if b {
+		return 1
+	}
+	return 0
+}
+
+// int32ToBytes converts int32 to bytes.
+func int32ToBytes(i int32) []byte {
+	buf := make([]byte, 4)
+	binary.BigEndian.PutUint32(buf, uint32(i))
+	return buf
+}
+
+// int64ToBytes converts int64 to bytes.
+func int64ToBytes(i int64) []byte {
+	buf := make([]byte, 8)
+	binary.BigEndian.PutUint64(buf, uint64(i))
+	return buf
+}
+
+type zipkinSpan struct {
+	*Span
+
+	// peer points to the peer service participating in this span,
+	// e.g. the Client if this span is a server span,
+	// or Server if this span is a client span
+	peer struct {
+		Ipv4        int32
+		Port        int16
+		ServiceName string
+	}
+
+	// used to distinguish local vs. RPC Server vs. RPC Client spans
+	spanKind string
+}
+
+func (s *zipkinSpan) handleSpecialTags() {
+	s.Lock()
+	defer s.Unlock()
+	if s.firstInProcess {
+		// append the process tags
+		s.tags = append(s.tags, s.tracer.tags...)
+	}
+	filteredTags := make([]Tag, 0, len(s.tags))
+	for _, tag := range s.tags {
+		if handler, ok := specialTagHandlers[tag.key]; ok {
+			handler(s, tag.value)
+		} else {
+			filteredTags = append(filteredTags, tag)
+		}
+	}
+	s.tags = filteredTags
+}
+
+func setSpanKind(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		s.spanKind = val
+		return
+	}
+	if val, ok := value.(ext.SpanKindEnum); ok {
+		s.spanKind = string(val)
+	}
+}
+
+func setPeerIPv4(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		if ip, err := utils.ParseIPToUint32(val); err == nil {
+			s.peer.Ipv4 = int32(ip)
+			return
+		}
+	}
+	if val, ok := value.(uint32); ok {
+		s.peer.Ipv4 = int32(val)
+		return
+	}
+	if val, ok := value.(int32); ok {
+		s.peer.Ipv4 = val
+	}
+}
+
+func setPeerPort(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		if port, err := utils.ParsePort(val); err == nil {
+			s.peer.Port = int16(port)
+			return
+		}
+	}
+	if val, ok := value.(uint16); ok {
+		s.peer.Port = int16(val)
+		return
+	}
+	if val, ok := value.(int); ok {
+		s.peer.Port = int16(val)
+	}
+}
+
+func setPeerService(s *zipkinSpan, value interface{}) {
+	if val, ok := value.(string); ok {
+		s.peer.ServiceName = val
+	}
+}
+
+func removeTag(s *zipkinSpan, value interface{}) {}
+
+func (s *zipkinSpan) peerDefined() bool {
+	return s.peer.ServiceName != "" || s.peer.Ipv4 != 0 || s.peer.Port != 0
+}
+
+func (s *zipkinSpan) isRPC() bool {
+	s.RLock()
+	defer s.RUnlock()
+	return s.spanKind == string(ext.SpanKindRPCClientEnum) || s.spanKind == string(ext.SpanKindRPCServerEnum)
+}
+
+func (s *zipkinSpan) isRPCClient() bool {
+	s.RLock()
+	defer s.RUnlock()
+	return s.spanKind == string(ext.SpanKindRPCClientEnum)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/LICENSE b/vendor/github.com/uber/jaeger-lib/LICENSE
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/LICENSE
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/counter.go b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
new file mode 100644
index 0000000..2a6a43e
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/counter.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Counter tracks the number of times an event has occurred
+type Counter interface {
+	// Inc adds the given value to the counter.
+	Inc(int64)
+}
+
+// NullCounter counter that does nothing
+var NullCounter Counter = nullCounter{}
+
+type nullCounter struct{}
+
+func (nullCounter) Inc(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/factory.go b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
new file mode 100644
index 0000000..0ead061
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/factory.go
@@ -0,0 +1,78 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"time"
+)
+
+// NSOptions defines the name and tags map associated with a factory namespace
+type NSOptions struct {
+	Name string
+	Tags map[string]string
+}
+
+// Options defines the information associated with a metric
+type Options struct {
+	Name string
+	Tags map[string]string
+	Help string
+}
+
+// TimerOptions defines the information associated with a metric
+type TimerOptions struct {
+	Name    string
+	Tags    map[string]string
+	Help    string
+	Buckets []time.Duration
+}
+
+// HistogramOptions defines the information associated with a metric
+type HistogramOptions struct {
+	Name    string
+	Tags    map[string]string
+	Help    string
+	Buckets []float64
+}
+
+// Factory creates new metrics
+type Factory interface {
+	Counter(metric Options) Counter
+	Timer(metric TimerOptions) Timer
+	Gauge(metric Options) Gauge
+	Histogram(metric HistogramOptions) Histogram
+
+	// Namespace returns a nested metrics factory.
+	Namespace(scope NSOptions) Factory
+}
+
+// NullFactory is a metrics factory that returns NullCounter, NullTimer, and NullGauge.
+var NullFactory Factory = nullFactory{}
+
+type nullFactory struct{}
+
+func (nullFactory) Counter(options Options) Counter {
+	return NullCounter
+}
+func (nullFactory) Timer(options TimerOptions) Timer {
+	return NullTimer
+}
+func (nullFactory) Gauge(options Options) Gauge {
+	return NullGauge
+}
+func (nullFactory) Histogram(options HistogramOptions) Histogram {
+	return NullHistogram
+}
+func (nullFactory) Namespace(scope NSOptions) Factory { return NullFactory }
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/gauge.go b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
new file mode 100644
index 0000000..3c60639
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/gauge.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Gauge returns instantaneous measurements of something as an int64 value
+type Gauge interface {
+	// Update the gauge to the value passed in.
+	Update(int64)
+}
+
+// NullGauge gauge that does nothing
+var NullGauge Gauge = nullGauge{}
+
+type nullGauge struct{}
+
+func (nullGauge) Update(int64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/histogram.go b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
new file mode 100644
index 0000000..d3bd617
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/histogram.go
@@ -0,0 +1,28 @@
+// Copyright (c) 2018 The Jaeger Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+// Histogram that keeps track of a distribution of values.
+type Histogram interface {
+	// Records the value passed in.
+	Record(float64)
+}
+
+// NullHistogram that does nothing
+var NullHistogram Histogram = nullHistogram{}
+
+type nullHistogram struct{}
+
+func (nullHistogram) Record(float64) {}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/keys.go b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
new file mode 100644
index 0000000..c24445a
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/keys.go
@@ -0,0 +1,35 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"sort"
+)
+
+// GetKey converts name+tags into a single string of the form
+// "name|tag1=value1|...|tagN=valueN", where tag names are
+// sorted alphabetically.
+func GetKey(name string, tags map[string]string, tagsSep string, tagKVSep string) string {
+	keys := make([]string, 0, len(tags))
+	for k := range tags {
+		keys = append(keys, k)
+	}
+	sort.Strings(keys)
+	key := name
+	for _, k := range keys {
+		key = key + tagsSep + k + tagKVSep + tags[k]
+	}
+	return key
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/metrics.go b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
new file mode 100644
index 0000000..0df0c66
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/metrics.go
@@ -0,0 +1,137 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"fmt"
+	"reflect"
+	"strconv"
+	"strings"
+)
+
+// MustInit initializes the passed in metrics and initializes its fields using the passed in factory.
+//
+// It uses reflection to initialize a struct containing metrics fields
+// by assigning new Counter/Gauge/Timer values with the metric name retrieved
+// from the `metric` tag and stats tags retrieved from the `tags` tag.
+//
+// Note: all fields of the struct must be exported, have a `metric` tag, and be
+// of type Counter or Gauge or Timer.
+//
+// Errors during Init lead to a panic.
+func MustInit(metrics interface{}, factory Factory, globalTags map[string]string) {
+	if err := Init(metrics, factory, globalTags); err != nil {
+		panic(err.Error())
+	}
+}
+
+// Init does the same as MustInit, but returns an error instead of
+// panicking.
+func Init(m interface{}, factory Factory, globalTags map[string]string) error {
+	// Allow user to opt out of reporting metrics by passing in nil.
+	if factory == nil {
+		factory = NullFactory
+	}
+
+	counterPtrType := reflect.TypeOf((*Counter)(nil)).Elem()
+	gaugePtrType := reflect.TypeOf((*Gauge)(nil)).Elem()
+	timerPtrType := reflect.TypeOf((*Timer)(nil)).Elem()
+	histogramPtrType := reflect.TypeOf((*Histogram)(nil)).Elem()
+
+	v := reflect.ValueOf(m).Elem()
+	t := v.Type()
+	for i := 0; i < t.NumField(); i++ {
+		tags := make(map[string]string)
+		for k, v := range globalTags {
+			tags[k] = v
+		}
+		var buckets []float64
+		field := t.Field(i)
+		metric := field.Tag.Get("metric")
+		if metric == "" {
+			return fmt.Errorf("Field %s is missing a tag 'metric'", field.Name)
+		}
+		if tagString := field.Tag.Get("tags"); tagString != "" {
+			tagPairs := strings.Split(tagString, ",")
+			for _, tagPair := range tagPairs {
+				tag := strings.Split(tagPair, "=")
+				if len(tag) != 2 {
+					return fmt.Errorf(
+						"Field [%s]: Tag [%s] is not of the form key=value in 'tags' string [%s]",
+						field.Name, tagPair, tagString)
+				}
+				tags[tag[0]] = tag[1]
+			}
+		}
+		if bucketString := field.Tag.Get("buckets"); bucketString != "" {
+			if field.Type.AssignableTo(timerPtrType) {
+				// TODO: Parse timer duration buckets
+				return fmt.Errorf(
+					"Field [%s]: Buckets are not currently initialized for timer metrics",
+					field.Name)
+			} else if field.Type.AssignableTo(histogramPtrType) {
+				bucketValues := strings.Split(bucketString, ",")
+				for _, bucket := range bucketValues {
+					b, err := strconv.ParseFloat(bucket, 64)
+					if err != nil {
+						return fmt.Errorf(
+							"Field [%s]: Bucket [%s] could not be converted to float64 in 'buckets' string [%s]",
+							field.Name, bucket, bucketString)
+					}
+					buckets = append(buckets, b)
+				}
+			} else {
+				return fmt.Errorf(
+					"Field [%s]: Buckets should only be defined for Timer and Histogram metric types",
+					field.Name)
+			}
+		}
+		help := field.Tag.Get("help")
+		var obj interface{}
+		if field.Type.AssignableTo(counterPtrType) {
+			obj = factory.Counter(Options{
+				Name: metric,
+				Tags: tags,
+				Help: help,
+			})
+		} else if field.Type.AssignableTo(gaugePtrType) {
+			obj = factory.Gauge(Options{
+				Name: metric,
+				Tags: tags,
+				Help: help,
+			})
+		} else if field.Type.AssignableTo(timerPtrType) {
+			// TODO: Add buckets once parsed (see TODO above)
+			obj = factory.Timer(TimerOptions{
+				Name: metric,
+				Tags: tags,
+				Help: help,
+			})
+		} else if field.Type.AssignableTo(histogramPtrType) {
+			obj = factory.Histogram(HistogramOptions{
+				Name:    metric,
+				Tags:    tags,
+				Help:    help,
+				Buckets: buckets,
+			})
+		} else {
+			return fmt.Errorf(
+				"Field %s is not a pointer to timer, gauge, or counter",
+				field.Name)
+		}
+		v.Field(i).Set(reflect.ValueOf(obj))
+	}
+	return nil
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
new file mode 100644
index 0000000..4a8abdb
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/stopwatch.go
@@ -0,0 +1,43 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"time"
+)
+
+// StartStopwatch begins recording the executing time of an event, returning
+// a Stopwatch that should be used to stop the recording the time for
+// that event.  Multiple events can be occurring simultaneously each
+// represented by different active Stopwatches
+func StartStopwatch(timer Timer) Stopwatch {
+	return Stopwatch{t: timer, start: time.Now()}
+}
+
+// A Stopwatch tracks the execution time of a specific event
+type Stopwatch struct {
+	t     Timer
+	start time.Time
+}
+
+// Stop stops executing of the stopwatch and records the amount of elapsed time
+func (s Stopwatch) Stop() {
+	s.t.Record(s.ElapsedTime())
+}
+
+// ElapsedTime returns the amount of elapsed time (in time.Duration)
+func (s Stopwatch) ElapsedTime() time.Duration {
+	return time.Since(s.start)
+}
diff --git a/vendor/github.com/uber/jaeger-lib/metrics/timer.go b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
new file mode 100644
index 0000000..e18d222
--- /dev/null
+++ b/vendor/github.com/uber/jaeger-lib/metrics/timer.go
@@ -0,0 +1,33 @@
+// Copyright (c) 2017 Uber Technologies, Inc.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package metrics
+
+import (
+	"time"
+)
+
+// Timer accumulates observations about how long some operation took,
+// and also maintains a historgam of percentiles.
+type Timer interface {
+	// Records the time passed in.
+	Record(time.Duration)
+}
+
+// NullTimer timer that does nothing
+var NullTimer Timer = nullTimer{}
+
+type nullTimer struct{}
+
+func (nullTimer) Record(time.Duration) {}